repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
SqueezeBits/owlite
owlite/owlite.py
[ { "identifier": "OWLITE_DEVICE_NAME", "path": "owlite_core/cli/device.py", "snippet": "OWLITE_DEVICE_NAME = CONNECTED_DEVICE[\"device\"] if CONNECTED_DEVICE else None" }, { "identifier": "OWLITE_FRONT_BASE_URL", "path": "owlite_core/constants.py", "snippet": "OWLITE_FRONT_BASE_URL = \"https://owlite.ai\"" }, { "identifier": "OWLITE_REPO_PATH", "path": "owlite_core/constants.py", "snippet": "OWLITE_REPO_PATH = os.path.join(os.getenv(\"OWLITE_REPO_DIR\", os.path.join(os.getcwd(), \"owlite\")))" }, { "identifier": "OWLITE_REPORT_URL", "path": "owlite_core/constants.py", "snippet": "OWLITE_REPORT_URL = \"https://tally.so/r/mOl5Zk\"" }, { "identifier": "OWLITE_SETTINGS", "path": "owlite_core/owlite_settings.py", "snippet": "OWLITE_SETTINGS = OwLiteSettings()" }, { "identifier": "download_trt_engine", "path": "owlite/api/device/devices.py", "snippet": "def download_trt_engine(benchmark_key: str, path_to_save: str) -> None:\n \"\"\"Downloads built TensorRT engine.\n\n Args:\n benchmark_key (str): A key to identify benchmark job.\n path_to_save (str): The path to save downloaded TensorRT engine.\n\n Raises:\n RuntimeError: When device is not set.\n HTTPError: When request was not successful.\n \"\"\"\n device_name = OWLITE_DEVICE_NAME\n if device_name is None:\n log.error(\"Device is not set. Please set device and try again\")\n raise RuntimeError(\"Device not found\")\n\n payload = {\n \"device_name\": device_name,\n \"benchmark_key\": benchmark_key,\n }\n resp = DEVICE_API_BASE.post(\"/devices/trt\", json=payload)\n assert isinstance(resp, dict)\n\n file_url = resp[\"trt_engine_url\"]\n\n download_file_from_url(file_url, path_to_save)" }, { "identifier": "poll_run_benchmark", "path": "owlite/api/device/devices.py", "snippet": "def poll_run_benchmark(project_id: str, benchmark_key: str) -> None:\n \"\"\"Polls for TensorRT benchmark result.\n\n Args:\n project_id (str): The id of a project.\n benchmark_key (str): A key to identify benchmark job.\n\n Raises:\n ValueError: When unexpected signal is caught by SIGINT handler.\n RuntimeError: When error occurred during TensorRT execution.\n \"\"\"\n\n def sigint_handler(sig: signal.Signals, frame: Any) -> None:\n if sig != signal.SIGINT:\n raise ValueError(f\"Unexpected signals: {sig} (frame={frame})\")\n print(\"\")\n log.info(\n f\"Exit from current experiment. \"\n f\"Continue creating config at \"\n f\"{OWLITE_FRONT_BASE_URL}/project/detail/{project_id}\"\n )\n sys.exit(sig)\n\n original_sigint_handler = signal.signal(signal.SIGINT, sigint_handler) # type: ignore\n\n log.info(\"Polling for benchmark result, you are free to CTRL-C away\")\n\n count = 0\n info = get_benchmark_queue_info(benchmark_key)\n benchmark_status = info[\"benchmark_status\"]\n in_progress = (\n BenchmarkStatus.PRE_FETCHING.value,\n BenchmarkStatus.BENCHMARKING.value,\n )\n while True:\n if count % 5 == 0:\n info = get_benchmark_queue_info(benchmark_key)\n new_status = info[\"benchmark_status\"]\n\n if new_status < 0:\n print(\"\")\n log.error(\n \"Runtime error occurred during TensorRT engine execution or benchmark. Please try again. \"\n f\"If the problem persists, please report us at {OWLITE_REPORT_URL} for further assistance\"\n )\n raise RuntimeError(\"Benchmarking error\")\n\n if benchmark_status != new_status and new_status in in_progress:\n benchmark_status = new_status\n count = 0\n\n elif new_status == BenchmarkStatus.BENCHMARK_DONE.value:\n print(\"\\nBenchmarking done\")\n signal.signal(signal.SIGINT, original_sigint_handler)\n return\n\n if benchmark_status in in_progress:\n if benchmark_status == BenchmarkStatus.PRE_FETCHING.value and info[\"prefetch\"] is not None:\n message = f\"Your position in the queue: {info['prefetch']} {'. ' * (count % 4)}\"\n\n else:\n dots_before = \".\" * count\n owl_emoji = \"\\U0001F989\"\n dots_after = \".\" * (19 - count)\n\n message = f\"[{dots_before}{owl_emoji}{dots_after}]\"\n\n print(f\"\\r{message:<50}\", end=\"\", flush=True)\n\n count = (count + 1) % 20\n time.sleep(2)" }, { "identifier": "request_trt_benchmark", "path": "owlite/api/device/devices.py", "snippet": "def request_trt_benchmark(benchmark_key: str, bin_path: str) -> None:\n \"\"\"Uploads ONNX weight binary file and request TensorRT benchmark.\n\n Args:\n benchmark_key (str): A key to identify benchmark job.\n bin_path (str): The path of a ONNX weight binary file.\n\n Raises:\n FileNotFoundError: When bin file does not exists at given path.\n ValueError: When device is not set.\n HTTPError: When request was not successful.\n \"\"\"\n\n if not os.path.exists(bin_path):\n log.error(\n f\"Unable to locate the ONNX bin file at the specified path: {bin_path}. \"\n \"Please ensure the file exists and the path is accurate. \"\n \"If the file is missing, recreate the ONNX file and retry\"\n )\n raise FileNotFoundError(\"ONNX bin file not found\")\n\n device_name = OWLITE_DEVICE_NAME\n if device_name is None:\n log.error(\"Connected device not found. Please connect device by 'owlite device connect'\")\n raise ValueError(\"Device not found\")\n\n payload = {\n \"device_name\": device_name,\n \"benchmark_key\": benchmark_key,\n }\n\n resp = DEVICE_API_BASE.post(\"/devices/jobs/export\", json=payload)\n assert isinstance(resp, dict)\n\n file_dest_url = resp[\"bin_file_url\"]\n\n file_upload_resp = upload_file_to_url(bin_path, file_dest_url)\n if not file_upload_resp.ok:\n file_upload_resp.raise_for_status()" }, { "identifier": "get_configuration", "path": "owlite/api/dove/doves.py", "snippet": "def get_configuration(\n project_id: str,\n baseline_name: str,\n run_name: str,\n) -> str:\n \"\"\"Gets configuration options to apply.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of a run.\n\n Returns:\n str: The compiled configuration string.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n resp = DOVE_API_BASE.post(\"/compile\", json=payload)\n assert isinstance(resp, dict)\n\n return json.dumps(resp)" }, { "identifier": "upload_baseline", "path": "owlite/api/dove/doves.py", "snippet": "def upload_baseline(\n project_id: str,\n baseline_name: str,\n onnx_path: str,\n model: GraphModule,\n) -> None:\n \"\"\"Uploads baseline's onnx proto and graph module.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n onnx_path (str): The path to baseline onnx proto file.\n model (GraphModule): The traced graph module.\n\n Raises:\n TypeError: When the `model` is not an instance of `torch.fx.GraphModule`.\n HTTPError: When the request was not successful.\n \"\"\"\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n _model_type = f\"torch.nn.parallel.{type(model).__name__}\"\n log.error(\n f\"{_model_type} is not supported by upload_baseline, please use 'attribute' module to unwrap model \"\n f\"{_model_type}. Try owlite.api.dove.doves.upload_baseline(..., model = model.module)\"\n )\n raise TypeError(f\"{_model_type} is not supported by upload_baseline\")\n if not isinstance(model, GraphModule):\n raise TypeError(f\"model of upload_baseline must be GraphModule, but got {type(model)}\")\n\n proto = onnx.load(onnx_path, load_external_data=False)\n input_shape = json.dumps(extract_input_signature_from_onnx_proto(proto))\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"gm\": serialize(model),\n \"onnx\": base64.b64encode(proto.SerializeToString()),\n \"input_shape\": input_shape,\n }\n\n DOVE_API_BASE.post(\"/upload\", payload)" }, { "identifier": "check_baseline_existence", "path": "owlite/api/main/baselines.py", "snippet": "def check_baseline_existence(project_id: str, baseline_name: str) -> bool:\n \"\"\"Checks if baseline with given name exists at project with given project id.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name to check.\n\n Returns:\n bool: True if baseline exists in given project, False otherwise.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n }\n\n try:\n resp = MAIN_API_BASE.post(\"/projects/baselines/check\", json=payload)\n assert isinstance(resp, bool)\n\n return resp\n\n except requests.exceptions.HTTPError as e:\n if e.response is not None and e.response.status_code == 404:\n return False\n\n raise e" }, { "identifier": "create_baseline", "path": "owlite/api/main/baselines.py", "snippet": "def create_baseline(project_id: str, baseline_name: str) -> str:\n \"\"\"Creates a baseline experiment with given baseline name at project with given project id.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline to create.\n\n Returns:\n str: The name of created baseline.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/baselines\", json=payload)\n assert isinstance(resp, dict)\n\n return resp[\"baseline_name\"]" }, { "identifier": "create_or_load_project", "path": "owlite/api/main/projects.py", "snippet": "def create_or_load_project(project_name: str, description: str = \"\") -> str:\n \"\"\"Creates a project with given name and description and return the id of created project, if\n a project with given name already exists and accessible by current user, return the id of\n existing project.\n\n Args:\n project_name (str): The name of a project.\n description (str): The description of a project. Defaults to \"\".\n\n Returns:\n str: The id of a created project.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n log.debug(f\"Attempt creating project with name {project_name}.\")\n\n payload = {\n \"project_name\": project_name,\n \"description\": description,\n }\n\n try:\n resp = MAIN_API_BASE.post(\"/projects\", json=payload)\n\n assert isinstance(resp, dict) and resp[\"name\"] == project_name\n\n log.info(f\"Created new project '{project_name}'\")\n return resp[\"id\"]\n\n except HTTPError as err:\n if err.response is not None and err.response.status_code == 409:\n # project with given name already was created by user before\n\n data = json.loads(err.response.content)\n project_id = data[\"detail\"]\n\n log.debug(f\"Conflict detected, project with name {project_name} already exists, loading existing project.\")\n log.info(f\"Loaded existing project '{project_name}'\")\n return project_id\n\n raise err" }, { "identifier": "copy_run", "path": "owlite/api/main/runs.py", "snippet": "def copy_run(project_id: str, baseline_name: str, duplicate_from: str, run_name: str) -> str:\n \"\"\"Copies existing experiment and create a new experiment. Compression configuration is also cloned.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n duplicate_from (str): The name of an experiment to clone.\n run_name (str): The name of a new experiment.\n\n Returns:\n str: The name of a created experiment.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": duplicate_from,\n \"new_run_name\": run_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/copy\", json=payload)\n assert isinstance(resp, dict)\n return str(resp[\"name\"])" }, { "identifier": "create_run", "path": "owlite/api/main/runs.py", "snippet": "def create_run(project_id: str, baseline_name: str, run_name: str) -> None:\n \"\"\"Creates an experiment.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of a new experiment.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n res = MAIN_API_BASE.post(\"/projects/runs\", json=payload)\n assert isinstance(res, dict)" }, { "identifier": "get_benchmark_key", "path": "owlite/api/main/runs.py", "snippet": "def get_benchmark_key(project_id: str, baseline_name: str, run_name: str) -> str:\n \"\"\"Gets a key to identify a benchmark job.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n\n Returns:\n str: A key to identify a benchmark job.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/keys\", json=payload)\n\n assert isinstance(resp, str)\n return resp" }, { "identifier": "get_run_info", "path": "owlite/api/main/runs.py", "snippet": "def get_run_info(project_id: str, baseline_name: str, run_name: str) -> Optional[dict]:\n \"\"\"Gets information of an experiment.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n\n Returns:\n Optional[dict]: The information of an experiment if exists, None otherwise.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n try:\n res = MAIN_API_BASE.post(\"/projects/runs/info\", json=payload)\n\n except requests.exceptions.HTTPError as e:\n if e.response is not None and e.response.status_code == 404:\n return None\n\n raise e\n\n assert isinstance(res, dict)\n return res" }, { "identifier": "update_run_info", "path": "owlite/api/main/runs.py", "snippet": "def update_run_info(\n project_id: str,\n baseline_name: str,\n run_name: str,\n logs: str,\n) -> None:\n \"\"\"Updates information for a specific experiment with model metrics.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n logs (str): Logs to be stored in the database.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n \"logs\": logs,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/update\", json=payload)\n assert isinstance(resp, str)" }, { "identifier": "upload_run_onnx_proto", "path": "owlite/api/main/runs.py", "snippet": "def upload_run_onnx_proto(\n project_id: str,\n baseline_name: str,\n run_name: str,\n onnx_path: str,\n dynamic_axes: Optional[dict[str, dict[int, dict[str, int]]]] = None,\n) -> None:\n \"\"\"Uploads experiment's onnx proto and graph module. Note that parameters are not uploaded.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n onnx_path (str): The path to experiment onnx proto file.\n dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]], optional): Dynamic axes setting,\n please refer to owlite.onnx.export for detail.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n input_signature = extract_input_signature_from_onnx_proto(onnx_path)\n if dynamic_axes is not None:\n new_input_signature = []\n for name, shape in input_signature:\n axis_setting = dynamic_axes.get(name)\n if axis_setting is not None:\n axis = next(iter(axis_setting))\n setting = axis_setting.get(axis)\n assert setting is not None\n range_setting = [\n setting.get(\"min\"),\n setting.get(\"opt\"),\n setting.get(\"max\"),\n setting.get(\"test\"),\n ]\n shape[axis] = range_setting # type: ignore\n new_input_signature.append((name, shape))\n input_signature = new_input_signature\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n \"input_shape\": json.dumps(input_signature),\n }\n\n file_dest_url = MAIN_API_BASE.post(\"/projects/runs/data/upload\", json=payload)\n\n assert file_dest_url is not None and isinstance(file_dest_url, str)\n file_upload_resp = upload_file_to_url(onnx_path, file_dest_url)\n\n if not file_upload_resp.ok:\n file_upload_resp.raise_for_status()" }, { "identifier": "symbolic_trace", "path": "owlite/backend/fx/trace.py", "snippet": "def symbolic_trace(model: torch.nn.Module, *args: Tensor, **kwargs: dict[str, Any]) -> GraphModule:\n \"\"\"Like `torch.fx.symbolic_trace`, this function traces the input `model` to convert it into a GraphModule.\n In order for the tracing to be successful, the `model` must be able to pass `torch.compile(model, fullgraph=True)`.\n\n Args:\n model (torch.nn.Module): a torch.nn.Module instance.\n\n Raises:\n TypeError: if the `model` is not an instance of `torch.nn.Module`\n RuntimeError: if the tracing fails.\n\n Returns:\n GraphModule: the converted GraphModule.\n \"\"\"\n if not isinstance(model, torch.nn.Module):\n raise TypeError(f\"Expected torch.nn.Module instance but object of type {type(model)} given: {model}\")\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n _model_type = f\"torch.nn.parallel.{type(model).__name__}\"\n log.error(\n f\"{_model_type} is not supported by symbolic trace, please use 'attribute' module to unwrap model \"\n f\"from {_model_type}. Try owlite.fx.symbolic_trace(model.module, ...)\"\n )\n raise TypeError(f\"{_model_type} is not supported by symbolic trace\")\n training_status = model.training\n # move input args and kwargs to model device\n device = get_most_common_device(model)\n dtype = get_most_common_floating_point_type(model)\n log.debug(f\"Tracing with device={device}, dtype={dtype}\")\n\n args = move_tensors_to(args, device, dtype)\n kwargs = move_tensors_to(kwargs, device, dtype)\n\n backend = BackendProvider()\n torch_dynamo.reset()\n optimized_model = torch.compile(model, fullgraph=True, backend=backend)\n output = optimized_model(*args, **kwargs)\n\n graph_module = backend.graph_module\n\n if graph_module is None:\n raise RuntimeError(\"Failed to create torch.fx.GraphModule while running optimized model\")\n\n graph_module = apply_graph_module_transforms(graph_module)\n graph_module = insert_output_adapter(graph_module, output)\n\n original_params = inspect.signature(model.forward).parameters\n graph_module_params = inspect.signature(graph_module.forward).parameters\n\n ignored_params = OrderedDict(\n filter(\n lambda item: (\n item[0] not in graph_module_params\n and item[1].kind\n not in (\n inspect._ParameterKind.VAR_POSITIONAL,\n inspect._ParameterKind.VAR_KEYWORD,\n )\n ),\n original_params.items(),\n )\n )\n if ignored_params:\n log.warning(\n \"The following parameters will be dropped from the graph module's forward method: \"\n f\"{', '.join(ignored_params)}\"\n )\n graph_module.train(training_status)\n graph_module.meta[\"owlite_status\"] = OwLiteStatus.NOT_COMPRESSED\n return graph_module" }, { "identifier": "configure_dynamic_dimensions", "path": "owlite/backend/onnx/dynamize.py", "snippet": "def configure_dynamic_dimensions(\n input_signature: list[tuple[str, Union[tuple[int, ...], str]]], dynamic_axes: dict[str, dict[int, dict[str, int]]]\n) -> DynamicDimensions:\n \"\"\"Configures dynamic dimension setting to be used by `dynamize` with given ONNX proto and dynamic axes setting.\n\n Args:\n input_signature (list[tuple[str, Union[tuple[int, ...], str]]]): A list of tuples mapping fx graph input names\n to their shape if they are torch.Tensor instances or to their class name otherwise.\n dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]], optional):\n To specify axes of tensors dynamic(i.e. known only at run-time), set `dynamic_axes` to a dict with schema:\n\n * KEY (str): an input name.\n\n * VALUE (dict[int, dict[str, int]]): a single item dictionary whose key is dynamic dimension of input\n and value is a dynamic range setting dictionary containing min, opt, max, test dimension size settings.\n\n Raises:\n ValueError: When dynamic ONNX proto is given or when invalid `dynamic_axes` is given.\n\n Returns:\n DynamicDimensions: Dynamic dimension setting to be used as an input of `dynamize`.\n \"\"\"\n\n if not check_dynamic_axes_setting(input_signature, dynamic_axes):\n raise ValueError(\"Invalid dynamic axes setting\")\n\n settings = {}\n dynamic_dim_size = None\n onnx_inputs_dict = dict(input_signature)\n for name, setting in dynamic_axes.items():\n dynamic_axis = next(iter(setting))\n\n shape = onnx_inputs_dict[name]\n assert shape is not None\n\n dynamic_dim_size = shape[dynamic_axis]\n\n min_val = setting[dynamic_axis].get(\"min\")\n max_val = setting[dynamic_axis].get(\"max\")\n opt_val = setting[dynamic_axis].get(\"opt\")\n opt_val = setting[dynamic_axis].get(\"test\")\n\n if dynamic_axis < 0:\n dynamic_axis = len(shape) + dynamic_axis\n\n settings[name] = DynamicSetting(shape, dynamic_axis, min_val, max_val, opt_val) # type: ignore\n\n assert dynamic_dim_size is not None and isinstance(dynamic_dim_size, int)\n return DynamicDimensions(dynamic_dim_size, settings)" }, { "identifier": "export", "path": "owlite/backend/onnx/export.py", "snippet": "def export(\n module: torch.nn.Module,\n args: Union[tuple[Any, ...], torch.Tensor],\n f: str,\n export_params: bool = True,\n verbose: bool = False,\n training: torch._C._onnx.TrainingMode = torch._C._onnx.TrainingMode.EVAL,\n input_names: Optional[Sequence[str]] = None,\n output_names: Optional[Sequence[str]] = None,\n operator_export_type: torch._C._onnx.OperatorExportTypes = torch._C._onnx.OperatorExportTypes.ONNX,\n opset_version: int = 17,\n do_constant_folding: bool = True,\n keep_initializers_as_inputs: Optional[bool] = None,\n custom_opsets: Optional[Mapping[str, int]] = None,\n export_modules_as_functions: Union[bool, Collection[type[torch.nn.Module]]] = False,\n use_fast_export: bool = True,\n apply_transforms: bool = True,\n simplify: bool = True,\n check_n: int = 1,\n skip_fuse_bn: bool = False,\n skipped_optimizers: Optional[list[str]] = None,\n dynamic_dimensions: Optional[DynamicDimensions] = None,\n) -> None:\n r\"\"\"Exports a model into ONNX format.\n\n Args:\n module (torch.nn.Module): The model to be exported.\n args (Union[tuple[Any, ...], torch.Tensor]): Argument of a `module`.\n\n args can be structured either as:\n\n 1. ONLY A TUPLE OF ARGUMENTS::\n\n args = (x, y, z)\n\n The tuple should contain model inputs such that `module(*args)` is a valid\n invocation of the model. Any non-Tensor arguments will be hard-coded into the\n exported model; any Tensor arguments will become inputs of the exported model,\n in the order they occur in the tuple.\n\n 2. A TENSOR::\n\n args = torch.Tensor([1])\n\n This is equivalent to a 1-ary tuple of that Tensor.\n\n 3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS::\n\n args = (\n x,\n {\n \"y\": input_y,\n \"z\": input_z\n }\n )\n\n All but the last element of the tuple will be passed as non-keyword arguments,\n and named arguments will be set from the last element. If a named argument is\n not present in the dictionary, it is assigned the default value, or None if a\n default value is not provided.\n\n .. note::\n If a dictionary is the last element of the args tuple, it will be\n interpreted as containing named arguments. In order to pass a dict as the\n last non-keyword arg, provide an empty dict as the last element of the args\n tuple. For example, instead of::\n\n export(\n module,\n (\n x,\n # WRONG: will be interpreted as named arguments\n {y: z}\n ),\n \"test.onnx.pb\"\n )\n\n Write::\n\n export(\n module,\n (\n x,\n {y: z},\n {}\n ),\n \"test.onnx.pb\"\n )\n f (str): A string containing a file name. A binary protocol buffer will be written to this file.\n export_params (bool, optional): If True, all parameters will\n be exported. Set this to False if you want to export an untrained model.\n In this case, the exported model will first take all of its parameters\n as arguments, with the ordering as specified by `module.state_dict().values()`. Defaults to True.\n verbose (bool, optional): If True, prints a description of the\n model being exported to stdout. In addition, the final ONNX graph will include the\n field `doc_string` from the exported model which mentions the source code locations\n for `module`. If True, ONNX exporter logging will be turned on. Defaults to False.\n training (torch._C._onnx.TrainingMode, optional): Defaults to torch._C._onnx.TrainingMode.EVAL.\n * `TrainingMode.EVAL`: export the model in inference mode.\n * `TrainingMode.PRESERVE`: export the model in inference mode if model.training is\n False and in training mode if model.training is True.\n * `TrainingMode.TRAINING`: export the model in training mode. Disables optimizations\n which might interfere with training.\n input_names (Optional[Sequence[str]], optional): Names to assign to the input nodes of the graph, in order.\n Names of `module.forward` arguments will be used when None is given. Defaults to None.\n output_names (Optional[Sequence[str]], optional): Names to assign to the output nodes of the graph, in order.\n Defaults to None.\n operator_export_type (torch._C._onnx.OperatorExportTypes, optional):\n Defaults to `torch._C._onnx.OperatorExportTypes.ONNX`.\n * `OperatorExportTypes.ONNX`: Export all ops as regular ONNX ops (in the default opset domain).\n * `OperatorExportTypes.ONNX_FALLTHROUGH`: Try to convert all ops\n to standard ONNX ops in the default opset domain. If unable to do so\n (e.g. because support has not been added to convert a particular torch op to ONNX),\n fall back to exporting the op into a custom opset domain without conversion. Applies\n to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_\n as well as ATen ops. For the exported model to be usable, the runtime must support\n these non-standard ops.\n * `OperatorExportTypes.ONNX_ATEN`: All ATen ops (in the TorchScript namespace \"aten\")\n are exported as ATen ops (in opset domain \"org.pytorch.aten\").\n `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so\n this instructs the runtime to use PyTorch's implementation of these ops.\n\n .. warning::\n\n Models exported this way are probably runnable only by Caffe2.\n\n This may be useful if the numeric differences in implementations of operators are\n causing large differences in behavior between PyTorch and Caffe2 (which is more\n common on untrained models).\n * `OperatorExportTypes.ONNX_ATEN_FALLBACK`: Try to export each ATen op\n (in the TorchScript namespace \"aten\") as a regular ONNX op. If we are unable to do so\n (e.g. because support has not been added to convert a particular torch op to ONNX),\n fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for\n context.\n For example::\n\n graph(%0 : Float):\n %3 : int = prim::Constant[value=0]()\n # conversion unsupported\n %4 : Float = aten::triu(%0, %3)\n # conversion supported\n %5 : Float = aten::mul(%4, %0)\n return (%5)\n\n Assuming `aten::triu` is not supported in ONNX, this will be exported as::\n\n graph(%0 : Float):\n %1 : Long() = onnx::Constant[value={0}]()\n # not converted\n %2 : Float = aten::ATen[operator=\"triu\"](%0, %1)\n # converted\n %3 : Float = onnx::Mul(%2, %0)\n return (%3)\n\n If PyTorch was built with Caffe2 (i.e. with `BUILD_CAFFE2=1`), then\n Caffe2-specific behavior will be enabled, including special support\n for ops are produced by the modules described in\n `Quantization <https://pytorch.org/docs/stable/quantization.html>`_.\n\n .. warning::\n\n Models exported this way are probably runnable only by Caffe2.\n opset_version (int, optional): The version of the default (ai.onnx) opset\n <https://github.com/onnx/onnx/blob/master/docs/Operators.md> to target. Must be >= 7 and <= 18.\n Defaults to 17.\n do_constant_folding (bool, optional): Apply the constant-folding optimization.\n Constant-folding will replace some of the ops that have all constant inputs\n with pre-computed constant nodes. Defaults to True.\n keep_initializers_as_inputs (Optional[bool], optional): If True, all the initializers\n (typically corresponding to parameters) in the exported graph will also be added\n as inputs to the graph. If False, then initializers are not added as inputs to the\n graph, and only the non-parameter inputs are added as inputs. This may allow for\n better optimizations (e.g. constant folding) by backends/runtimes. Defaults to None.\n custom_opsets (Optional[Mapping[str, int]], optional): A dict with schema:\n\n * KEY (str): opset domain name\n * VALUE (int): opset version\n\n If a custom opset is referenced by ``model`` but not mentioned in this dictionary,\n the opset version is set to 1. Only custom opset domain name and version should be\n indicated through this argument. Defaults to None.\n export_modules_as_functions (Union[bool, Collection[type[torch.nn.Module]]], optional): Flag to enable\n exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the\n particular types of modules to export as local functions in ONNX.\n This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because\n ``opset_version`` < 15 implies IR version < 8, which means no local function support.\n Module variables will be exported as function attributes. There are two categories of function\n attributes. Defaults to False.\n use_fast_export (bool, optional): If True, export process will be done in memory. If `module` with total\n parameter size larger than 2GB, this flag will be automatically set to `False`. If False, temporary\n export process will be done using temporary files. Defaults to True.\n apply_transforms (bool, optional): If True, ONNX transforms defined by SqueezeBits.inc will be applied for\n model optimization. If False, ONNX transformations will be skipped. However, turning this flag to `False`\n is experimental and might yield unexpected behavior. Defaults to True.\n simplify (bool, optional): If True, onnx-simplifier will be run. If False, onnx-simplifier will be skipped.\n Defaults to True.\n check_n (int, optional): Only available when `simplify=True`. The number of times to run check for the\n simplified ONNX proto after onnx-simplifier. Defaults to 1.\n skip_fuse_bn (bool, optional): Only available when `simplify=True`. Whether to skip batchnorm-fusion.\n Defaults to False.\n skipped_optimizers (Optional[list[str]], optional): Only available when `simplify=True`. The list of\n onnx-simplifier passes to skip. Defaults to None.\n See https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes for available passes.\n dynamic_dimensions (Optional[DynamicDimensions], optional): Dynamic dimensions setting configured by\n `configure_dynamic_dimensions`. Defaults to None.\n\n Raises:\n TypeError: If `f` is not a string.\n ValueError: If the quantizer has invalid condition.\n `torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph.\n `torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it\n uses an operator that is not supported by the exporter.\n `torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export.\n All errors are subclasses of :class:`errors.OnnxExporterError`.\n \"\"\"\n\n if not isinstance(f, str):\n raise TypeError(\"owlite.onnx.export requires the argument `f` to be a string.\")\n\n if isinstance(module, GraphModule):\n if module.meta[\"owlite_status\"] == OwLiteStatus.COMPRESSED:\n log.warning(\n \"This module has not yet been calibrated. \"\n \"The onnx that comes out of this module may have unexpected results in accuracy and latency.\"\n )\n\n clip_narrow_range_weights(module)\n # Batch Norm Fusing\n fuse_bn(module)\n\n # zero point folding\n fold_zp_to_bias(module)\n\n check_fake_quantization_condition(module)\n\n device = get_most_common_device(module)\n dtype = get_most_common_floating_point_type(module)\n args = move_tensors_to(args, device, dtype)\n\n size_in_gigabytes = sum(p.numel() * p.element_size() for p in module.parameters()) / (1 << 30)\n\n if size_in_gigabytes >= 2:\n log.warning(\n f\"Model has total parameter size larger than 2 GB ({size_in_gigabytes:.2f} GB).\"\n '\"use_fast_export\" will be set to False'\n )\n use_fast_export = False\n\n export_function, optimize_function = (_export, _optimize) if use_fast_export else (_export_path, _optimize_path)\n\n if opset_version is None:\n opset_version = 17\n\n if input_names is None and isinstance(module, GraphModule):\n input_names = get_default_input_names(module, args)\n onnx_proto = export_function(\n module,\n args=args,\n export_params=export_params,\n verbose=verbose,\n training=training,\n input_names=input_names,\n output_names=output_names,\n operator_export_type=operator_export_type,\n opset_version=opset_version,\n do_constant_folding=do_constant_folding,\n keep_initializers_as_inputs=keep_initializers_as_inputs,\n custom_opsets=custom_opsets,\n export_modules_as_functions=export_modules_as_functions,\n )\n\n if skipped_optimizers is None:\n skipped_optimizers = [\"fuse_qkv\"]\n\n onnx_proto = optimize_function(\n onnx_proto,\n apply_transforms=apply_transforms,\n simplify=simplify,\n check_n=check_n,\n skip_fuse_bn=skip_fuse_bn,\n skipped_optimizers=skipped_optimizers,\n )\n\n if dynamic_dimensions is not None:\n onnx_proto = dynamize(onnx_proto, dynamic_dimensions)\n\n onnx_proto.producer_name = f\"owlite + {onnx_proto.producer_name}\"\n onnx_proto.doc_string = \"Processed by OwLite\"\n\n model_dir = os.path.dirname(f)\n name, _ = os.path.splitext(os.path.basename(f))\n location = f\"{name}.bin\"\n abs_location = os.path.join(model_dir, location)\n\n log.info(f\"Saving exported ONNX proto at {f} with external data {location}\")\n if model_dir:\n os.makedirs(model_dir, exist_ok=True)\n if abs_location is not None and os.path.isfile(abs_location):\n log.warning(f\"External data file at {abs_location} will be overwritten.\")\n # os.remove is required since onnx.save opens the external data file with mode='ab'\n os.remove(abs_location)\n onnx.save(\n onnx_proto,\n f,\n location=location,\n save_as_external_data=True,\n size_threshold=0,\n )" }, { "identifier": "get_input_shape_signature", "path": "owlite/backend/onnx/export.py", "snippet": "def get_input_shape_signature(\n module: torch.nn.Module, *args: Any, **kwargs: Any\n) -> list[tuple[str, Union[tuple[int, ...], str]]]:\n \"\"\"Maps the parameter names of a PyTorch module's forward method to the corresponding values' shapes or class name.\n\n This function returns a list of tuples, where each tuple contains a parameter name and its corresponding shape\n (as a tuple of integers) if the value is an instance of `torch.Tensor` or otherwise the name of the class of\n the value.\n\n Args:\n module (torch.nn.Module): The PyTorch module to inspect.\n args (Any): Positional arguments to be passed to the module.\n kwargs (Any): Keyword arguments to be passed to the module.\n\n Returns:\n list[tuple[str, Union[tuple[int, ...], str]]]: A list of tuples mapping parameter names to their shape\n (if they are torch.Tensor instances) or to their class name (for non-torch.Tensor instances).\n\n Note:\n This function assumes that `args` and `kwargs` match the signatures of the module's forward method exactly,\n in order and length. If they don't, the result may not be as expected or exceptions might occur.\n \"\"\"\n signature_map = map_signature(module.forward, *args, **kwargs)\n return [\n (\n name,\n tuple(value.shape) if isinstance(value, torch.Tensor) else value.__class__.__name__,\n )\n for name, value in signature_map\n ]" }, { "identifier": "log", "path": "owlite/logger.py", "snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):" }, { "identifier": "ONNXExportOptions", "path": "owlite/options/onnx_export_options.py", "snippet": "class ONNXExportOptions:\n \"\"\"\n Class handling options for ONNX export.\n\n OwLite internally imports the target model to ONNX during conversion or benchmarking.\n Users can set options for ONNX export using this class.\n \"\"\"\n\n opset_version: int = 17" }, { "identifier": "GraphQuantizationOptions", "path": "owlite/options/quantization_options.py", "snippet": "class GraphQuantizationOptions(OptionsDict):\n \"\"\"\n * Key (str): the name of a FX node\n * Value (NodeQuantizationOptions): node quantization options\n \"\"\"\n\n ValueType = NodeQuantizationOptions" }, { "identifier": "quantize", "path": "owlite/quantize.py", "snippet": "def quantize(model: GraphModule, options: GraphQuantizationOptions) -> GraphModule:\n \"\"\"Quantizes the model with the specification described in options.\n\n This function inserts quantizers with the quantization options specified in the options,\n substitutes them with the Quantized module, and performs post-processing. The linear module\n that quantizes the bias cannot fuse the batch norm after quantizing, so it proceeds to fuse\n the batch norm. Then, it fuses quantizers with the same quantization option that correspond\n to the same tensor in the original model.\n\n Args:\n model (GraphModule): The symbolic traced model to be quantized.\n options (GraphQuantizationOptions): Options specifying the quantization.\n\n Raises:\n TypeError: If model is not a instance of `GraphModule`.\n\n Returns:\n GraphModule: Quantized model.\n \"\"\"\n\n if not isinstance(model, GraphModule):\n raise TypeError(\"Only GraphModule instance can be quantized with `owlite.quantize`\")\n configure(model, options)\n fuse_linear_bn_with_quantized_bias(model)\n log.debug(\"Fusing the redundant quantizers.\")\n fuse_redundant_quantizers(model)\n enable_quantizers(model, True)\n return model" } ]
import json import os import torch from dataclasses import asdict, dataclass from typing import Any, Optional from torch.fx import GraphModule # type: ignore from torch.nn.parallel import DataParallel, DistributedDataParallel from owlite_core.cli.device import OWLITE_DEVICE_NAME from owlite_core.constants import ( OWLITE_FRONT_BASE_URL, OWLITE_REPO_PATH, OWLITE_REPORT_URL, ) from owlite_core.owlite_settings import OWLITE_SETTINGS from .api.device.devices import ( download_trt_engine, poll_run_benchmark, request_trt_benchmark, ) from .api.dove.doves import get_configuration, upload_baseline from .api.main.baselines import check_baseline_existence, create_baseline from .api.main.projects import create_or_load_project from .api.main.runs import ( copy_run, create_run, get_benchmark_key, get_run_info, update_run_info, upload_run_onnx_proto, ) from .backend.fx.trace import symbolic_trace from .backend.onnx.dynamize import configure_dynamic_dimensions from .backend.onnx.export import export, get_input_shape_signature from .logger import log from .options import GraphQuantizationOptions, ONNXExportOptions from .quantize import quantize
12,914
self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.bin", ) request_trt_benchmark(benchmark_key, bin_path) log.info("TensorRT engine execution and benchmark successfully requested") poll_run_benchmark(self.project_id, benchmark_key) exp_info = get_run_info(self.project_id, self.baseline_name, self.experiment_name) assert exp_info is not None if self.is_baseline: log.info( "Latency\n" f"\t\tBaseline - {exp_info['latency']} on {exp_info['device_name']}\n" "\t\tConfigure the quantization settings located at " f"{OWLITE_FRONT_BASE_URL}/project/detail/{self.project_id}" ) else: log.info( "Latency\n" f"\t\tConfigured - {exp_info['latency']} on {exp_info['device_name']}\n" "\t\tRetrieve the specifics of the experiment at " f"{OWLITE_FRONT_BASE_URL}/project/detail/{self.project_id}" ) engine_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.engine", ) download_trt_engine(benchmark_key, engine_path) def log(self, **kwargs) -> None: """Logs the model's metrics. Notes: Log metrics with OwLite like below ... owl = owlite.init(...) ... owl.log(accuracy=0.72, loss=1.2) Raises: TypeError: When data is not JSON serializable. """ try: logs = json.dumps(kwargs) except TypeError as e: log.error("Data is not JSON serializable") raise e update_run_info(self.project_id, self.baseline_name, self.experiment_name, logs) # pylint: disable-next=too-many-branches def init( project: str, baseline: str, experiment: Optional[str] = None, duplicate_from: Optional[str] = None, description: str = "", onnx_export_options: Optional[ONNXExportOptions] = None, ) -> OwLite: """Sets project, baseline and experiment information in DB to proper state and creates `OwLite` instance. Args: project (str): OwLite project name. baseline (str): OwLite baseline name. experiment (str, optional): OwLite experiment name. Defaults to None. duplicate_from (str, optional): OwLite source experiment name. Defaults to None. description (str, optional): OwLite project description. Defaults to "". onnx_export_options (ONNXExportOptions, optional): Options for ONNX export. Defaults to None. Raises: RuntimeError: When not authenticated. ValueError: When invalid experiment name or baseline name is given. Returns: OwLite: Created `OwLite` instance. """ if OWLITE_SETTINGS.tokens is None: log.error("Please log in using 'owlite login'. Account not found on this device") raise RuntimeError("OwLite token not found") if OWLITE_DEVICE_NAME is None: log.warning("Connected device not found. Please connect device by 'owlite device connect --name (name)'") else: log.info(f"Connected device: {OWLITE_DEVICE_NAME}") if experiment == baseline: log.error(f"Experiment name '{baseline}' is reserved for baseline. Please try with a different experiment name") raise ValueError("Invalid experiment name") dir_path = os.path.join( OWLITE_REPO_PATH, project, baseline, experiment or baseline, ) if os.path.exists(dir_path): log.warning(f"Existing local directory found at {dir_path}. Continuing this code will overwrite the data") else: os.makedirs(dir_path, exist_ok=True) log.info(f"Experiment data will be saved in {dir_path}") # create or load project project_id = create_or_load_project(project, description) if experiment is None: if duplicate_from: log.warning(f"duplicate_from='{duplicate_from}' will be ignored as no value for experiment was provided")
# type: ignore """OwLite Optimization Module This module facilitates optimization and benchmarking of models using OwLite services.""" @dataclass class OwLite: """Class handling OwLite project, baseline, and experiment configurations. The OwLite class manages project, baseline, and experiment configurations within the OwLite system. It allows users to create or load projects, set baselines, create or duplicate experiments, convert models, and benchmark models against the specified configurations. """ project_id: str project_name: str baseline_name: str experiment_name: str onnx_export_options: ONNXExportOptions module_args: Optional[tuple[Any, ...]] = None module_kwargs: Optional[dict[str, Any]] = None @property def is_baseline(self) -> bool: # pylint: disable=missing-function-docstring return self.baseline_name == self.experiment_name def convert(self, model: torch.nn.Module, *args, **kwargs) -> GraphModule: """Converts input model to compressed model. Args: model (torch.nn.Module): Model to compress. Returns: GraphModule: Compressed graph module. Raises: HTTPError: When request for compression configuration was not successful. """ log.info("Model conversion initiated") try: model = symbolic_trace(model, *args, **kwargs) except Exception as e: # pylint: disable=broad-exception-caught log.error( "Failed to extract the computation graph from the provided model. " "Please check the error message for details.\n" "If the issue persists, try replacing with a traceable node. " "In case the problem remain unresolved, kindly report it at " f"{OWLITE_REPORT_URL} for further assistance" ) raise e self.module_args = args self.module_kwargs = kwargs if self.is_baseline: onnx_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.onnx", ) export( model, (*self.module_args, self.module_kwargs), onnx_path, **asdict(self.onnx_export_options), ) log.info("Baseline ONNX saved") upload_baseline(self.project_id, self.baseline_name, onnx_path, model) log.info("Uploaded the model excluding parameters") else: exp_info = get_run_info(self.project_id, self.baseline_name, self.experiment_name) assert exp_info is not None if not exp_info["config_id"]: log.warning("No compression configuration found, skipping the compression process") else: log.info(f"Compression configuration found for '{self.experiment_name}'") configuration_string = get_configuration(self.project_id, self.baseline_name, self.experiment_name) options = GraphQuantizationOptions.load(configuration_string) log.info("Applying compression configuration") model = quantize(model, options) log.info("Converted the model") return model def benchmark( self, model: GraphModule, dynamic_axes: Optional[dict[str, dict[int, dict[str, int]]]] = None, ) -> None: """Benchmarks given model. Args: model (GraphModule): Model to benchmark. dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]]): By default the exported model will have the shapes of all input tensors set to exactly match those given when calling convert. To specify axes of tensors as dynamic (i.e. known only at run-time), set `dynamic_axes` to a dict with schema: * KEY (str): an input name. * VALUE (dict[int, dict[str, int]]): a single item dictionary whose key is dynamic dimension of input and value is a dynamic range setting dictionary containing min, opt, max, test dimension size settings. For example:: import owlite owl = owlite.init( ... ) class SumModule(torch.nn.Module): def forward(self, x): return torch.sum(x, dim=1) model = owl.convert( ... ) ... # set first(0-th) dimension of input x to be dynamic within the range of 1 ~ 8 # optimize for 4 and benchmark for 5 owl.benchmark(model, dynamic_axes={ "x": { 0: { "min": 1, "opt": 4, "max": 8, "test": 5, } } }) Raises: TypeError: When the `model` is an instance of `torch.nn.DataParallel` or `torch.nn.DistributedDataParallel`. RuntimeError: When `dynamic_axes` is set for baseline benchmark. ValueError: When invalid `dynamic_axes` is given. """ if isinstance(model, (DataParallel, DistributedDataParallel)): _model_type = f"torch.nn.parallel.{type(model).__name__}" log.error( f"{_model_type} is not supported by benchmark, please use attribute module " f"to unwrap model from {_model_type}. Try owlite.benchmark(model.module)" ) raise TypeError(f"{_model_type} is not supported by benchmark") if self.is_baseline: log.info( f"Benchmark initiated. '{self.baseline_name}' " "ONNX will be uploaded to the connected device for TensorRT execution and benchmark" ) if dynamic_axes is not None: log.error( "Baseline cannot be done with dynamic input. To benchmark baseline model with dynamic input, " "please create a run without compression configuration and benchmark that run with dynamic input" ) raise RuntimeError("Attempted dynamic baseline benchmark") else: log.info( f"Benchmark initiated. '{self.experiment_name}' " "ONNX will be created and uploaded to the connected device for TensorRT execution and benchmark" ) dynamic_dimensions = None if dynamic_axes is not None: sep = "', '" log.info(f"dynamic_axes setting for following inputs are provided. '{sep.join(dynamic_axes.keys())}'") input_signature = get_input_shape_signature( model, *(self.module_args or ()), **(self.module_kwargs or {}) ) dynamic_dimensions = configure_dynamic_dimensions(input_signature, dynamic_axes) onnx_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.onnx", ) export( model, (*(self.module_args or ()), self.module_kwargs), onnx_path, **asdict(self.onnx_export_options), dynamic_dimensions=dynamic_dimensions, ) log.info("Experiment ONNX saved") upload_run_onnx_proto(self.project_id, self.baseline_name, self.experiment_name, onnx_path, dynamic_axes) log.info("Uploaded the model excluding parameters") benchmark_key = get_benchmark_key(self.project_id, self.baseline_name, self.experiment_name) bin_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.bin", ) request_trt_benchmark(benchmark_key, bin_path) log.info("TensorRT engine execution and benchmark successfully requested") poll_run_benchmark(self.project_id, benchmark_key) exp_info = get_run_info(self.project_id, self.baseline_name, self.experiment_name) assert exp_info is not None if self.is_baseline: log.info( "Latency\n" f"\t\tBaseline - {exp_info['latency']} on {exp_info['device_name']}\n" "\t\tConfigure the quantization settings located at " f"{OWLITE_FRONT_BASE_URL}/project/detail/{self.project_id}" ) else: log.info( "Latency\n" f"\t\tConfigured - {exp_info['latency']} on {exp_info['device_name']}\n" "\t\tRetrieve the specifics of the experiment at " f"{OWLITE_FRONT_BASE_URL}/project/detail/{self.project_id}" ) engine_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.engine", ) download_trt_engine(benchmark_key, engine_path) def log(self, **kwargs) -> None: """Logs the model's metrics. Notes: Log metrics with OwLite like below ... owl = owlite.init(...) ... owl.log(accuracy=0.72, loss=1.2) Raises: TypeError: When data is not JSON serializable. """ try: logs = json.dumps(kwargs) except TypeError as e: log.error("Data is not JSON serializable") raise e update_run_info(self.project_id, self.baseline_name, self.experiment_name, logs) # pylint: disable-next=too-many-branches def init( project: str, baseline: str, experiment: Optional[str] = None, duplicate_from: Optional[str] = None, description: str = "", onnx_export_options: Optional[ONNXExportOptions] = None, ) -> OwLite: """Sets project, baseline and experiment information in DB to proper state and creates `OwLite` instance. Args: project (str): OwLite project name. baseline (str): OwLite baseline name. experiment (str, optional): OwLite experiment name. Defaults to None. duplicate_from (str, optional): OwLite source experiment name. Defaults to None. description (str, optional): OwLite project description. Defaults to "". onnx_export_options (ONNXExportOptions, optional): Options for ONNX export. Defaults to None. Raises: RuntimeError: When not authenticated. ValueError: When invalid experiment name or baseline name is given. Returns: OwLite: Created `OwLite` instance. """ if OWLITE_SETTINGS.tokens is None: log.error("Please log in using 'owlite login'. Account not found on this device") raise RuntimeError("OwLite token not found") if OWLITE_DEVICE_NAME is None: log.warning("Connected device not found. Please connect device by 'owlite device connect --name (name)'") else: log.info(f"Connected device: {OWLITE_DEVICE_NAME}") if experiment == baseline: log.error(f"Experiment name '{baseline}' is reserved for baseline. Please try with a different experiment name") raise ValueError("Invalid experiment name") dir_path = os.path.join( OWLITE_REPO_PATH, project, baseline, experiment or baseline, ) if os.path.exists(dir_path): log.warning(f"Existing local directory found at {dir_path}. Continuing this code will overwrite the data") else: os.makedirs(dir_path, exist_ok=True) log.info(f"Experiment data will be saved in {dir_path}") # create or load project project_id = create_or_load_project(project, description) if experiment is None: if duplicate_from: log.warning(f"duplicate_from='{duplicate_from}' will be ignored as no value for experiment was provided")
created_baseline = create_baseline(project_id, baseline)
11
2023-12-08 06:41:50+00:00
16k
qitan/devops-backend-lite
apps/deploy/ext_func.py
[ { "identifier": "convert_xml_to_str_with_pipeline", "path": "common/custom_format.py", "snippet": "def convert_xml_to_str_with_pipeline(xml, url, secret, desc, jenkinsfile, scm=True):\n \"\"\"\n scm\n True: jenkinsfile为指定的git地址\n False: jenkinsfile为具体的pipeline\n \"\"\"\n xml_dict = xmltodict.parse(xml)\n if scm:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsScmFlowDefinition'\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'url'] = url\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'credentialsId'] = secret\n xml_dict['flow-definition']['definition']['scriptPath'] = jenkinsfile\n else:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition'\n xml_dict['flow-definition']['definition']['script'] = jenkinsfile\n xml_dict['flow-definition']['definition']['sandbox'] = 'true'\n xml_dict['flow-definition']['description'] = desc\n result = xmltodict.unparse(\n xml_dict, short_empty_elements=True, pretty=True)\n return result" }, { "identifier": "AesCipher", "path": "common/utils/AesCipher.py", "snippet": "class AesCipher(object):\n def __init__(self, secret_key='Devops SecretKey'):\n self.__secret_key = secret_key\n self.__aes = AES.new(str.encode(self.__secret_key), AES.MODE_ECB)\n\n def encrypt(self, data):\n while len(data) % 16 != 0: # 补足字符串长度为16的倍数\n data += (16 - len(data) % 16) * chr(16 - len(data) % 16)\n cipher_data = str(base64.encodebytes(self.__aes.encrypt(str.encode(data))), encoding='utf8').replace('\\n', '')\n return cipher_data\n\n def decrypt(self, cipher_data):\n try:\n decrypted_text = self.__aes.decrypt(base64.decodebytes(bytes(cipher_data, encoding='utf8'))).decode(\"utf8\")\n decrypted_text = decrypted_text[:-ord(decrypted_text[-1])] # 去除多余补位\n return decrypted_text\n except BaseException as e:\n print('data', e)\n raise Exception(e)" }, { "identifier": "CI_LATEST_KEY", "path": "common/variables.py", "snippet": "CI_LATEST_KEY = 'ci:deploy:latest::' # {CI_LATEST_KEY}{appinfo.id}" }, { "identifier": "JenkinsBuild", "path": "qtasks/tasks_build.py", "snippet": "class JenkinsBuild(object):\n JOB_MODEL = {'app': BuildJob}\n\n def __init__(self, url, username, password, job_id=0, appinfo_id=0, job_type='app'):\n self.__url = url\n self.__username = username\n self.__password = password\n self.__job_id = job_id\n self.__appinfo_id = int(appinfo_id) if appinfo_id else 0\n self.__job_type = job_type\n self.jenkins_cli = GlueJenkins(\n self.__url, self.__username, self.__password)\n\n def job_info(self):\n try:\n job = self.JOB_MODEL[self.__job_type].objects.filter(\n pk=self.__job_id).first()\n if self.__appinfo_id != 0:\n appinfo_obj = AppInfo.objects.get(id=self.__appinfo_id)\n job_name = appinfo_obj.jenkins_jobname\n return job_name, job, appinfo_obj\n job_name = f'jar-dependency-deploy-job-{job.name}'\n return job_name, job, None\n except BaseException as e:\n return None, None, None\n\n def log_stage(self):\n job_name, job, _ = self.job_info()\n try:\n flow_json = self.jenkins_cli.get_flow_detail(\n job_name, build_number=job.build_number)\n flow_json['data'] = {'job_id': 0}\n except BaseException as e:\n pass\n r = {'SUCCESS': 1, 'FAILED': 2, 'ABORTED': 4, 'FAILURE': 2}\n if flow_json['status'] in r:\n # 当前状态不在 {'IN_PROGRESS': 3, 'NOT_EXECUTED': 5}\n return True, flow_json\n return False, flow_json\n\n def log_console(self):\n job_name, job, _ = self.job_info()\n try:\n flow_json = self.jenkins_cli.get_build_console_output(\n job_name, number=job.build_number)\n except BaseException as e:\n pass\n flow_info = self.jenkins_cli.get_build_info(job_name, job.build_number)\n if flow_info['result']:\n return True, {'status': flow_info['result'], 'data': flow_json}\n return False, {'status': flow_info['result'], 'data': flow_json}\n\n def queue(self, queue_number):\n count = 0\n _flag = True\n while _flag:\n queue_item = self.jenkins_cli.get_queue_item(queue_number)\n if 'executable' in queue_item:\n if queue_item['executable'].get('number', None):\n return True, queue_item['executable']['number']\n count += 1\n if count > 600:\n _flag = False\n time.sleep(0.5)\n return False, 0\n\n def create(self, jenkinsfile='jardependency/Jenkinsfile', desc='Jar依赖包构建上传任务'):\n JENKINS_CONFIG = get_redis_data('cicd-jenkins')\n job_name, _, appinfo_obj = self.job_info()\n try:\n config_xml = convert_xml_to_str_with_pipeline(JENKINS_CONFIG['xml'],\n JENKINS_CONFIG['pipeline']['http_url_to_repo'],\n JENKINS_CONFIG['gitlab_credit'],\n desc,\n jenkinsfile,\n scm=True)\n if not self.jenkins_cli.job_exists(job_name):\n self.jenkins_cli.create_job(\n name=job_name, config_xml=config_xml)\n else:\n self.jenkins_cli.reconfig_job(\n name=job_name, config_xml=config_xml)\n return True, None\n except Exception as err:\n logger.exception(f'创建Jenkins任务[{job_name}]失败, 原因: {err}')\n return False, f'创建Jenkins任务[{job_name}]失败, 原因: {err}'\n\n def build(self, params):\n job_name, job, _ = self.job_info()\n try:\n if self.jenkins_cli.get_job_info(job_name).get('inQueue'):\n logger.info(f'构建失败, 原因: Jenkins job 排队中 请稍后再试')\n return False, 'Jenkins job 排队中 请稍后再试'\n except Exception as err:\n logger.error(f'获取构建的Jenkins任务[{job_name}]失败, 原因: {err}')\n return False, '获取Jenkins JOB失败,可能job不存在或者jenkins未运行,联系运维检查!'\n try:\n queue_number = self.jenkins_cli.build_job(\n job_name, parameters=params)\n if queue_number:\n return True, queue_number, job\n return False, None, 'Jenkins任务异常.'\n except BaseException as e:\n logger.exception(f'构建异常,原因:{e}')\n return False, None, f'构建异常,原因:{e}'\n\n def exists(self):\n job_name, job, appinfo_obj = self.job_info()\n return self.jenkins_cli.job_exists(job_name), job, appinfo_obj\n\n def create_view(self, appinfo_obj: AppInfo, env: Environment):\n view_xml_config = f'''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<hudson.model.ListView>\n <name>{appinfo_obj.app.project.alias}{env.alias}</name>\n <filterExecutors>false</filterExecutors>\n <filterQueue>false</filterQueue>\n <properties class=\"hudson.model.View$PropertyList\"/>\n <jobNames>\n <comparator class=\"hudson.util.CaseInsensitiveComparator\"/>\n </jobNames>\n <jobFilters/>\n <columns>\n <hudson.views.StatusColumn/>\n <hudson.views.WeatherColumn/>\n <hudson.views.JobColumn/>\n <jenkins.branch.DescriptionColumn/>\n <hudson.views.LastSuccessColumn/>\n <hudson.views.LastFailureColumn/>\n <hudson.views.LastDurationColumn/>\n <hudson.views.BuildButtonColumn/>\n </columns>\n <includeRegex>{env.name.lower()}-.*-{appinfo_obj.app.project.name.lower()}-.*</includeRegex>\n</hudson.model.ListView>'''\n self.jenkins_cli.create_view(\n f'{appinfo_obj.app.project.alias}{env.alias}', view_xml_config)\n\n def callback(self):\n \"\"\"\n Jenkins Pipeline构建结束回调平台,平台再获取构建结果入库\n :param appinfo_id: 应用模块ID\n :param build_number: 构建ID\n :return:\n \"\"\"\n job_name, job, appinfo_obj = self.job_info()\n if not job:\n return\n # 标记回调\n cache.set(f'{JENKINS_CALLBACK_KEY}{job.id}', 1, 60 * 60)\n time.sleep(1)\n flow_json = self.jenkins_cli.get_flow_detail(\n job_name, build_number=job.build_number)\n flow_console_output = self.jenkins_cli.get_build_console_output(\n job_name, number=job.build_number)\n if JENKINS_STATUS_MAP[flow_json['status']] == 3:\n # Jenkins构建成功后回调平台,等待平台响应,此时状态仍为3时,需要重新查询\n # 构建中再次查询\n async_task('qtasks.tasks_build.jenkins_callback_handle', **\n {'job_id': int(job.id), 'appinfo_id': appinfo_obj.id if appinfo_obj else 0, 'job_type': self.__job_type})\n return\n\n job.status = JENKINS_STATUS_MAP[flow_json['status']]\n job.save()\n\n if self.__job_type == 'app':\n # 应用构建\n if flow_json['status'] == 'SUCCESS' and appinfo_obj.environment.image_sync:\n try:\n # 当前构建结果成功, 可用镜像存入缓存\n _key = f\"{DEPLOY_IMAGE_KEY}{appinfo_obj.app.id}\"\n if appinfo_obj.app.multiple_ids:\n # 存在关联应用\n _key = f\"{DEPLOY_IMAGE_KEY}{'+'.join([str(i) for i in sorted(appinfo_obj.app.multiple_ids)])}\"\n jobs = cache.get(_key, [])\n _job_retain = int(get_datadict('JOB_RETAIN')['value']) if get_datadict(\n 'JOB_RETAIN') else 10\n if len(jobs) > _job_retain:\n jobs.pop()\n jobs.insert(0, job)\n # 缓存不过期\n cache.set(_key, jobs, 60 * 60 * 24 * 3)\n cache.set(f\"{_key}:{CI_LATEST_SUCCESS_KEY}\",\n job, 60 * 60 * 24 * 3)\n except BaseException as e:\n logger.exception(\n f\"应用[{appinfo_obj.uniq_tag}]构建缓存异常, 原因: {e}\")\n # 缓存最新构建记录\n try:\n cache.set(f\"{CI_LATEST_KEY}{appinfo_obj.id}\",\n job, 60 * 60 * 24 * 3)\n except BaseException as e:\n logger.exception('缓存最新构建异常', e)\n # 存储结果\n BuildJobResult.objects.create(\n **{'job_id': job.id, 'result': json.dumps(flow_json), 'console_output': flow_console_output})\n cache.set(f\"{CI_RESULT_KEY}{job.id}\",\n {'result': json.dumps(\n flow_json), 'console_output': flow_console_output},\n 60 * 5)\n # 构建完成通知\n self.notice(job, appinfo_obj, flow_json)\n return\n job.result = json.dumps(flow_json)\n job.console_output = flow_console_output\n job.save()\n\n def notice(self, job, appinfo_obj, flow_json):\n try:\n # 构建完成通知\n notify = appinfo_obj.app.project.notify\n if notify.get('mail', None) is None and notify.get('robot', None) is None:\n logger.info(f\"应用[{appinfo_obj.uniq_tag}]未启用消息通知.\")\n return\n # 通知消息key\n msg_key = f\"{MSG_KEY}{appinfo_obj.environment.name}:{appinfo_obj.app.appid}:{job.build_number}\"\n # 延时通知\n delay = 0.1\n title = f\"{appinfo_obj.app.appid}构建{flow_json['status']}\"\n try:\n git_commit_date = datetime.datetime.strptime(job.commits['committed_date'],\n \"%Y-%m-%dT%H:%M:%S.%f+00:00\").replace(\n tzinfo=datetime.timezone.utc).astimezone(pytz.timezone('Asia/Shanghai'))\n except:\n git_commit_date = job.commits['committed_date']\n msg = f'''**<font color=\"{JENKINS_COLOR_MAP[flow_json['status']]}\">{DataDict.objects.get(key=appinfo_obj.app.category).value} 构建 {flow_json['status']}</font>** \n项目: {appinfo_obj.app.project.alias} \n环境: {appinfo_obj.environment.name} \n应用ID: {appinfo_obj.app.appid} \n构建NO.: [{job.build_number}] \n{'构建分支模块' if appinfo_obj.app.category == 'category.front' else '构建分支'}: {job.commit_tag['label']}/{job.commit_tag['name']} {appinfo_obj.app.category == 'category.front' and job.modules} \n提交ID: {job.commits['short_id']} \n提交人: {job.commits['committer_name']} \n提交信息: {job.commits['message']} \n提交时间: {git_commit_date} \n构建类型: {'构建发布' if job.is_deploy else '构建'} \n构建者: {job.deployer.first_name or job.deployer.username} \n构建时间: {job.created_time.astimezone(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S+08:00')} \n '''\n if job.is_deploy:\n delay = get_datadict('NOTIFY_DELAY', 1)['delay'] if get_datadict(\n 'NOTIFY_DELAY', 1) else 60 * 5\n if job.status != 1:\n # 构建结果不成功立即发出通知\n delay = 0.1\n if notify.get('mail', None):\n try:\n mail_send = OmsMail()\n recv_mail = job.deployer.email\n mail_send.deploy_notify(title, msg, recv_mail)\n except BaseException as e:\n logger.warning(f\"邮件发送失败, 原因: {e}\")\n # 机器人通知\n if notify.get('robot', None):\n try:\n robot = notify['robot']\n recv_phone = job.deployer.mobile\n recv_openid = job.deployer.feishu_openid\n cache.set(f\"{msg_key}:ci:{job.id}\",\n {'appid': appinfo_obj.app.appid, 'robot': robot, 'recv_phone': recv_phone,\n 'recv_openid': recv_openid, 'msg_key': msg_key,\n 'msg': msg,\n 'title': title}, 60 * 60 * 3)\n cache.set(f\"{DELAY_NOTIFY_KEY}{msg_key}\", {'curr_time': datetime.datetime.now(), 'delay': delay},\n 60 * 60 * 3)\n taskid = schedule('qtasks.tasks.deploy_notify_queue', *[msg_key],\n **{'appid': appinfo_obj.app.appid, 'robot': robot,\n 'recv_phone': recv_phone, 'recv_openid': recv_openid,\n 'msg_key': msg_key, 'title': title},\n schedule_type=Schedule.ONCE,\n next_run=datetime.datetime.now() + datetime.timedelta(seconds=delay))\n except BaseException as e:\n logger.warning(f\"机器人通知发送失败, 原因: {e}\")\n except BaseException as e:\n pass" }, { "identifier": "AppInfo", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "DevLanguage", "path": "dbapp/model/model_cmdb.py", "snippet": "class DevLanguage(TimeAbstract):\n name = models.CharField(max_length=100, unique=True, verbose_name='开发语言')\n alias = models.CharField(max_length=128, default='', verbose_name='别名')\n base_image = models.JSONField(default=dict, verbose_name='基础镜像',\n help_text='{\"project\": \"\", \"project_id\": \"\", \"image\": \"\", \"tag\": \"\"}')\n build = models.JSONField(default=dict, verbose_name='构建命令')\n dockerfile = models.TextField(\n null=True, blank=True, default='', verbose_name='Dockerfile模板')\n pipeline = models.TextField(\n null=True, blank=True, default='', verbose_name='流水线模板')\n template_k8s = models.TextField(null=True, blank=True, default='', verbose_name='Kubernetes模板',\n help_text='从数据字典接口获取,对应项的key为YAML')\n labels = models.JSONField(default=get_default_labels, verbose_name='标签',\n help_text='{label: [{\"name\": \"name\", \"value\": \"value\"}], selector: [{\"name\": \"name\", \"value\": \"value\"}], command: \"\"}')\n desc = models.TextField(verbose_name='描述', null=True, blank=True)\n\n def __str__(self) -> str:\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = True\n\n class Meta:\n db_table = 'cmdb_devlanguage'\n verbose_name = '开发语言'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "KubernetesDeploy", "path": "dbapp/model/model_cmdb.py", "snippet": "class KubernetesDeploy(TimeAbstract):\n appinfo = models.ForeignKey(\n AppInfo, related_name='app_info', null=True, on_delete=models.CASCADE)\n kubernetes = models.ForeignKey(\n KubernetesCluster, related_name='app_k8s', null=True, on_delete=models.CASCADE)\n online = models.SmallIntegerField(default=0, choices=G_ONLINE_CHOICE, verbose_name='是否上线',\n help_text=f'默认为0,即未上线\\n可选项: {G_ONLINE_CHOICE}')\n version = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='当前版本')\n\n def __str__(self):\n return '%s-%s' % (self.appinfo.app.appid, self.kubernetes.name)\n\n class Meta:\n db_table = 'cmdb_kubernetesdeploy'\n default_permissions = ()" }, { "identifier": "get_datadict", "path": "common/ext_fun.py", "snippet": "def get_datadict(name, config=0, default_value=None):\n \"\"\"\n 从数据字典获取数据\n \"\"\"\n try:\n qs = DataDict.objects.get(key=name)\n except BaseException as e:\n return default_value\n if config:\n ret = json.loads(qs.extra)\n else:\n ret = {'id': qs.id, 'key': qs.key,\n 'value': qs.value, 'desc': qs.desc}\n return ret" }, { "identifier": "get_redis_data", "path": "common/ext_fun.py", "snippet": "def get_redis_data(name):\n ret = cache.get(f\"system:{name}\")\n if not ret:\n try:\n if name == 'cicd-harbor':\n qs = SystemConfig.objects.filter(type=name)[0]\n else:\n qs = SystemConfig.objects.get(name=name)\n except BaseException as e:\n return None\n ret = json.loads(qs.config)\n set_redis_data(name, ret)\n\n return ret" }, { "identifier": "harbor_cli", "path": "common/ext_fun.py", "snippet": "def harbor_cli(namespace, **filters):\n try:\n harbor = SystemConfig.objects.filter(**filters).first()\n # 获取harbor配置\n harbor_config = json.loads(harbor.config)\n except BaseException as e:\n logger.exception(f'创建任务失败, 原因: 获取harbor仓库异常, {e}')\n return False, f\"获取harbor仓库异常:{e}\"\n # 构建前创建harbor项目\n cli = HarborAPI(url=harbor_config['url'], username=harbor_config['user'],\n password=harbor_config['password'])\n try:\n cli.create_project(\n namespace, public=harbor_config.get('public', False))\n except BaseException as e:\n pass\n return True, harbor_config" }, { "identifier": "template_generate", "path": "common/ext_fun.py", "snippet": "def template_generate(appinfo_obj: AppInfo, image=None, partial_deploy_replicas: int = 0):\n \"\"\"\n 生成Kubernetes Deployment Yaml\n \"\"\"\n\n def health_lifecycle_generate(item, enable=True):\n _c = {}\n for i in template[item]['data']:\n _x = {}\n if i.get('enable', enable):\n for j in i['items']:\n if '__' in j['name']:\n _t = j['name'].split('__')\n _value = j['value']\n if j['name'] == 'exec__command':\n _value = [\"sh\", \"-c\", j['value']]\n if _x.get(_t[0], None):\n _x[_t[0]][_t[1]] = _value\n else:\n _x[_t[0]] = {_t[1]: _value}\n else:\n _x[j['name']] = j['value']\n _c[i['name']] = _x\n return _c\n\n def container_generate(container_data):\n containers = []\n for i in container_data:\n if i.get('enable', None):\n container = get_datadict(i['key'], config=1)\n if not container:\n container = i['extra']\n containers.append(\n container)\n return containers\n language_obj = DevLanguage.objects.get(name=appinfo_obj.app.language)\n project_config = ProjectConfig.objects.filter(project_id=appinfo_obj.app.project.id,\n environment_id=appinfo_obj.environment.id)\n namespace = appinfo_obj.namespace\n harbor_config = get_redis_data('cicd-harbor')\n harbor_url = harbor_config['url'].split('://')[1]\n image = f\"{harbor_url}/{image}\"\n\n template = {}\n # 模板优先级\n # 应用模块 -> 应用 -> 项目 -> 环境\n if project_config.first():\n project_template = project_config.first().template\n for k, v in project_template.items():\n if v and isinstance(v, (dict,)):\n if v.get('custom', False) is False:\n if appinfo_obj.environment.template.get(k, None):\n template[k] = appinfo_obj.environment.template[k]\n else:\n if project_template.get(k, None):\n template[k] = project_template[k]\n\n microapp_template = appinfo_obj.app.template\n for k, v in microapp_template.items():\n if '_on' in k and v:\n _k = k.rstrip('_on')\n if microapp_template.get(_k, None):\n template[_k] = microapp_template[_k]\n use_host_network = False\n if appinfo_obj.template.get('userHostNetwork', 0):\n use_host_network = True\n for k, v in appinfo_obj.template.items():\n if v and isinstance(v, (dict,)):\n if v.get('custom', False) and appinfo_obj.template.get(k, None):\n template[k] = appinfo_obj.template[k]\n\n yaml_template = {'kind': 'Deployment', 'metadata': {}, 'spec':\n {'strategy': {}, 'template': {'metadata': {}, 'spec':\n {'containers': [{'ports': [{'containerPort': 8080}], 'resources': []}],\n 'imagePullSecrets': [{'name': 'loginharbor'}],\n 'terminationGracePeriodSeconds': 120}\n }\n }\n }\n\n try:\n tz = appinfo_obj.app.project.product.region.extra['timezone']\n except BaseException as e:\n tz = 'Asia/Shanghai'\n try:\n if template.get('strategy', None):\n for i in template['strategy']['data']:\n if i['key'] in ['maxSurge', 'maxUnavailable']:\n if yaml_template['spec']['strategy'].get('rollingUpdate', None) is None:\n yaml_template['spec']['strategy']['rollingUpdate'] = {}\n yaml_template['spec']['strategy']['rollingUpdate'][i['key']\n ] = f\"{i['value']}%\"\n else:\n yaml_template['spec'][i['key']] = i['value']\n _d = {}\n for i in template['resources']['data']:\n _t = i['key'].split('_')\n if _d.get(_t[0], None):\n _d[_t[0]][_t[1]] = f\"{i['value']}{i['slot']}\"\n else:\n _d[_t[0]] = {_t[1]: f\"{i['value']}{i['slot']}\"}\n yaml_template['spec']['template']['spec']['containers'][0]['resources'] = _d\n\n yaml_template['metadata']['name'] = appinfo_obj.app.name\n yaml_template['metadata']['namespace'] = namespace\n yaml_template['spec']['template']['spec']['containers'][0]['name'] = appinfo_obj.app.name\n yaml_template['spec']['template']['spec']['containers'][0]['image'] = image\n command = appinfo_obj.app.template.get(\n 'command', None) or language_obj.labels.get('command', None)\n if command:\n if command.startswith('./'):\n yaml_template['spec']['template']['spec']['containers'][0]['command'] = [\n command]\n else:\n yaml_template['spec']['template']['spec']['containers'][0]['command'] = [\n 'sh', '-c', command]\n\n # 优先级: 应用模块>应用>预设>开发语言\n labels = template['label']['data']\n labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}])\n yaml_template['spec']['template']['metadata']['labels'] = {\n i['name']: i['value'] for i in labels}\n yaml_template['spec']['template']['metadata']['labels'][\n 'status-app-name-for-ops-platform'] = appinfo_obj.app.name\n yaml_template['spec']['selector'] = {\n 'matchLabels': {i['name']: i['value'] for i in labels}}\n\n selectors = template['selector']['data']\n yaml_template['spec']['template']['spec']['nodeSelector'] = {\n i['name']: i['value'] for i in selectors}\n\n if 'annotations' not in yaml_template['spec']['template']['metadata']:\n yaml_template['spec']['template']['metadata']['annotations'] = {}\n\n for i in template['prometheus']['data']:\n yaml_template['spec']['template']['metadata'][\n 'annotations'][f'prometheus.io/{i[\"name\"]}'] = i['value']\n if 'prometheus.io/path' in yaml_template['spec']['template']['metadata']['annotations']:\n yaml_template['spec']['template']['metadata']['annotations'][\n 'prometheus.io/app_product'] = appinfo_obj.app.project.product.name\n yaml_template['spec']['template']['metadata']['annotations'][\n 'prometheus.io/app_env'] = appinfo_obj.environment.name\n yaml_template['spec']['template']['metadata']['annotations'][\n 'prometheus.io/app_project'] = appinfo_obj.app.project.name\n\n # 环境变量\n envs = [{'name': 'TZ', 'value': tz}]\n envs.extend(template['env']['data'])\n envs.extend([\n {'name': '_RESTART', 'value': datetime.now().strftime(\n '%Y%m%d%H%M%S')}, # _RESTART变量用于强制更新deployment\n {'name': 'PRODUCT_NAME', 'value': appinfo_obj.app.project.product.name},\n {'name': 'PROJECT_NAME', 'value': appinfo_obj.app.project.name},\n {'name': 'APPNAME', 'value': appinfo_obj.app.name},\n {'name': 'APPID', 'value': appinfo_obj.app.appid},\n {'name': 'ENV', 'value': appinfo_obj.environment.name},\n {'name': 'POD_NAMESPACE', 'value': namespace}\n ])\n envs = list({i['name']: i for i in envs}.values())\n for i in envs:\n try:\n env_value = i.get('value', None)\n cmname = i.pop('cmname', None)\n cmkey = i.pop('cmkey', None)\n if env_value:\n env_value = env_value.lstrip('\"').rstrip(\n '\"').lstrip(\"'\").rstrip(\"'\")\n i.pop('value', None)\n i['name'] = i['name'].lstrip('\"').rstrip(\n '\"').lstrip(\"'\").rstrip(\"'\")\n if i.get('valueFrom', None) == 'configMapKeyRef':\n i['valueFrom'] = {'configMapKeyRef': {\n 'name': cmname, 'key': cmkey}}\n else:\n i['value'] = env_value\n i['valueFrom'] = None\n except BaseException as e:\n pass\n yaml_template['spec']['template']['spec']['containers'][0]['env'] = envs\n\n if template.get('health', False):\n _d = health_lifecycle_generate('health', True)\n for k, v in _d.items():\n yaml_template['spec']['template']['spec']['containers'][0][k] = v\n if template.get('lifecycle', False):\n yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'] = {\n }\n _d = health_lifecycle_generate('lifecycle', False)\n for k, v in _d.items():\n yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'][k] = v\n\n _vo_mount = [{'mountPath': '/data/logs',\n 'name': 'logs', 'readOnly': False}]\n _volumes = [{'name': 'logs', 'type': 'Directory', 'hostPath': {\n 'path': f'/data/{appinfo_obj.environment.name}-applogs/{appinfo_obj.app.project.name}/'}}]\n if template.get('storage', None):\n for k, v in template['storage']['data'].items():\n for i in v:\n _x = {}\n for m, n in i.items():\n if isinstance(n, (str,)):\n n = n.replace('${APPNAME}', appinfo_obj.app.name)\n if '_' in m:\n _t = m.split('_')\n if _x.get(_t[0], None):\n _x[_t[0]][_t[1]] = n\n else:\n _x[_t[0]] = {_t[1]: n}\n else:\n _x[m] = n\n _t = {'mountPath': _x['mount'], 'name': _x['name'],\n 'readOnly': True if _x.get('mode', None) == 'ReadOnly' else False}\n if _x.get('file', None):\n _t['subPath'] = _x['configMap']['items'][0]['key']\n _vo_mount.append(_t)\n _mode = _x.pop('mode', None)\n _x.pop('file', None)\n _x.pop('mount', None)\n if _x.get('configMap', None):\n _x['configMap']['defaultMode'] = 0o600 if _mode == 'ReadOnly' else 0o755\n _volumes.append(_x)\n yaml_template['spec']['template']['spec']['containers'][0]['volumeMounts'] = _vo_mount\n yaml_template['spec']['template']['spec']['volumes'] = _volumes\n if use_host_network:\n yaml_template['spec']['template']['spec']['hostNetwork'] = True\n partial_deploy_yaml_template = None\n\n except BaseException as e:\n logger.exception(f'generate yaml err {e.__class__} {e}')\n return {'ecode': 500, 'message': str(e)}\n\n # 多容器处理\n if appinfo_obj.template.get('containers_custom', None):\n containers = container_generate(\n appinfo_obj.template.get('containers', []))\n else:\n containers = container_generate(\n project_config.first().template.get('containers', []))\n yaml_template['spec']['template']['spec']['containers'].extend(containers)\n ret = {'ecode': 200, 'image': image, 'yaml': yaml_template}\n\n if partial_deploy_yaml_template:\n ret['partial_deploy_yaml'] = partial_deploy_yaml_template\n return ret" }, { "identifier": "BuildJob", "path": "dbapp/model/model_deploy.py", "snippet": "class BuildJob(TimeAbstract):\n \"\"\"\n 持续构建模型\n \"\"\"\n order_id = models.IntegerField(default=0, verbose_name='发布工单ID')\n appid = models.CharField(max_length=250, default='0',\n verbose_name='应用ID', help_text='应用唯一标识,无需填写')\n appinfo_id = models.IntegerField(\n default=0, db_index=True, verbose_name='应用模块ID')\n deployer = models.ForeignKey(UserProfile, verbose_name='发布人', blank=True, related_name='deployer', null=True,\n default=None, on_delete=models.SET_NULL)\n # {0: 未构建, 1: 构建成功, 2: 构建失败, 3: 构建中, 4: 作废}\n status = models.SmallIntegerField(default=3, choices=G_CI_STATUS, verbose_name=\"状态\",\n help_text=f\"状态值: {dict(G_CI_STATUS)}\")\n queue_number = models.IntegerField(default=0, verbose_name='队列ID')\n build_number = models.IntegerField(default=0, verbose_name='构建ID')\n commits = models.JSONField(default=dict, verbose_name='提交信息')\n commit_tag = models.JSONField(default=dict, verbose_name='提交类型',\n help_text='label可选: heads|tags\\nname: 具体的分支或者标签\\n{\"label\": \"heads\", \"name\": \"master\"}')\n # {0: 构建, 1: 构建发布}\n is_deploy = models.SmallIntegerField(default=0, verbose_name='构建发布',\n help_text='是否构建完后进行发布, {0: 不发布, 1: 发布}')\n jenkins_flow = models.TextField(\n verbose_name='jenkins pipeline', blank=True, null=True, default=\"\")\n image = models.CharField(max_length=250, blank=True,\n null=True, verbose_name='容器镜像')\n sync_status = models.SmallIntegerField(default=0, choices=G_IMAGE_SYNC_STAT, verbose_name='镜像同步状态',\n help_text=f\"{dict(G_IMAGE_SYNC_STAT)}, 默认0\")\n modules = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='工程模块')\n batch_uuid = models.CharField(\n max_length=40, null=True, blank=True, verbose_name='批量部署标识')\n\n @property\n def job_name(self):\n try:\n appinfo_obj = AppInfo.objects.get(id=self.appinfo_id)\n job_name = f'{appinfo_obj.environment.name}-{appinfo_obj.app.category.split(\".\")[-1]}-{appinfo_obj.app.project.name}-{appinfo_obj.app.name.split(\".\")[-1]}'.lower(\n )\n except AppInfo.DoesNotExist:\n job_name = ''\n return job_name\n\n def __str__(self):\n return '%s-%s-%s' % (self.order_id, self.appinfo_id, self.image)\n\n class Meta:\n db_table = 'deploy_buildjob'\n default_permissions = ()\n ordering = ['-id']" }, { "identifier": "DeployJob", "path": "dbapp/model/model_deploy.py", "snippet": "class DeployJob(TimeAbstract):\n \"\"\"\n 持续部署模型\n \"\"\"\n uniq_id = models.CharField(\n max_length=250, unique=True, verbose_name='发布ID')\n order_id = models.CharField(max_length=40, null=True, blank=True, verbose_name=u'工单号',\n help_text='前端不需要传值')\n appid = models.CharField(max_length=250, default='0',\n verbose_name='应用ID', help_text='应用唯一标识,无需填写')\n appinfo_id = models.IntegerField(\n default=0, db_index=True, verbose_name='应用模块ID')\n deployer = models.ForeignKey(UserProfile, verbose_name='发布人', blank=True, related_name='cd_deployer', null=True,\n default=None, on_delete=models.SET_NULL)\n status = models.SmallIntegerField(default=0, choices=G_CD_STATUS, verbose_name=\"状态\",\n help_text=f'部署状态: {dict(G_CD_STATUS)}, 默认0')\n image = models.CharField(max_length=250, blank=True,\n null=True, verbose_name='容器镜像')\n kubernetes = models.JSONField(default=list, verbose_name='部署集群',\n help_text='待发布集群\\n格式为array, 存储集群id, eg: [1,2]')\n deploy_type = models.SmallIntegerField(default=0, choices=G_CD_TYPE, verbose_name='部署类型',\n help_text=f\"{dict(G_CD_TYPE)}, 默认0\")\n rollback_reason = models.SmallIntegerField(null=True, blank=True,\n verbose_name='回滚原因') # 具体类型查看 datadict 的 ROLLBACK_TYPE\n rollback_comment = models.TextField(\n null=True, blank=True, default='', verbose_name='回滚备注')\n modules = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='工程模块')\n batch_uuid = models.CharField(\n max_length=40, null=True, blank=True, verbose_name='批量部署标识')\n\n @property\n def job_name(self):\n try:\n appinfo_obj = AppInfo.objects.get(id=self.appinfo_id)\n job_name = f'{appinfo_obj.environment}-{appinfo_obj.app.category.split(\".\")[-1]}-{appinfo_obj.app.project.name}-{appinfo_obj.app.name.split(\".\")[-1]}'.lower(\n )\n except AppInfo.DoesNotExist:\n job_name = ''\n return job_name\n\n def __str__(self) -> str:\n return self.uniq_id\n\n class Meta:\n db_table = 'deploy_deployjob'\n default_permissions = ()\n ordering = ['-id']" }, { "identifier": "PublishOrder", "path": "dbapp/model/model_deploy.py", "snippet": "class PublishOrder(TimeAbstract):\n \"\"\"\n 发布工单,关联工单审批\n \"\"\"\n order_id = models.CharField(\n max_length=40, unique=True, verbose_name=u'工单号', help_text='前端不需要传值')\n dingtalk_tid = models.CharField(max_length=250, default=None, null=True, blank=True, verbose_name='钉钉工单ID',\n help_text='填写钉钉流程单号, 可为空')\n title = models.CharField(default='', max_length=250, verbose_name=u'标题')\n category = models.SmallIntegerField(default=0, choices=G_TICKET_TYPE, verbose_name='发版类型',\n help_text=f'可选: {G_TICKET_TYPE}')\n creator = models.ForeignKey(UserProfile, null=True, related_name='publish_creator', on_delete=models.SET_NULL,\n verbose_name=u'工单创建人')\n node_name = models.CharField(\n max_length=50, blank=True, null=True, verbose_name='节点')\n content = models.TextField(default='', verbose_name=u'变更内容')\n formdata = models.JSONField(default=dict, verbose_name='上线表单')\n effect = models.TextField(blank=True, null=True, verbose_name=u'影响')\n environment = models.IntegerField(\n null=True, blank=True, verbose_name='应用环境', help_text=\"应用环境ID\")\n apps = models.ManyToManyField(\n PublishApp, related_name='publish_apps', verbose_name='待发布应用')\n app = models.JSONField(default=list, verbose_name='应用服务',\n help_text='工单未审核通过, 展示关联的待发布应用.\\n格式为数组, 存放应用ID, 如[1, 2]')\n # {0: 未构建, 1: 构建成功, 2: 构建失败, 3: 构建中, 4: 作废/中止}\n status = models.SmallIntegerField(default=0, choices=G_ORDER_STATUS, verbose_name='发布单状态',\n help_text=f'工单状态:\\n{G_ORDER_STATUS}')\n result = models.TextField(blank=True, null=True,\n verbose_name=u'处理结果', help_text='前端无需传值')\n expect_time = models.DateTimeField(\n verbose_name='期望发布时间', default=None, null=True)\n executor = models.ForeignKey(UserProfile, null=True, related_name='publish_executor', on_delete=models.SET_NULL,\n help_text='前端不需要传值')\n deploy_time = models.DateTimeField(\n verbose_name='发布时间', default=None, null=True)\n method = models.CharField(max_length=6, default='manual',\n verbose_name='发版方式', help_text='{manual: 手动, auto: 自动, plan: 定时}')\n team_members = models.JSONField(default=list, verbose_name='团队人员')\n extra_deploy_members = models.JSONField(\n default=list, verbose_name='额外指定发布人员')\n\n def __str__(self):\n return str(self.title)\n\n class Meta:\n db_table = 'deploy_publishorder'\n default_permissions = ()\n verbose_name = '发布工单'\n verbose_name_plural = verbose_name + '管理'\n ordering = ['-created_time']" }, { "identifier": "DataDict", "path": "dbapp/model/model_ucenter.py", "snippet": "class DataDict(CommonParent):\n key = models.CharField(max_length=80, unique=True, verbose_name='键')\n value = models.CharField(max_length=80, verbose_name='值')\n extra = models.TextField(null=True, blank=True,\n default='', verbose_name='额外参数')\n desc = models.CharField(max_length=255, blank=True,\n null=True, verbose_name='备注')\n\n def __str__(self):\n return self.value\n\n class Meta:\n db_table = 'ucenter_datadict'\n default_permissions = ()\n verbose_name = '字典'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "UserProfile", "path": "dbapp/model/model_ucenter.py", "snippet": "class UserProfile(TimeAbstract, AbstractUser):\n \"\"\"\n 用户信息\n \"\"\"\n mobile = models.CharField(max_length=11, null=True,\n blank=True, verbose_name=\"手机号码\")\n avatar = models.ImageField(upload_to=\"static/%Y/%m\", default=\"image/default.png\",\n max_length=250, null=True, blank=True)\n department = models.ManyToManyField(\n Organization, related_name='org_user', verbose_name='部门')\n position = models.CharField(\n max_length=50, null=True, blank=True, verbose_name=\"职能\")\n title = models.CharField(max_length=50, null=True,\n blank=True, verbose_name=\"职位\")\n leader_user_id = models.CharField(\n max_length=64, null=True, blank=True, verbose_name=\"直属领导ID\")\n roles = models.ManyToManyField(\n \"Role\", verbose_name=\"角色\", related_name='user_role', blank=True)\n dn = models.CharField(max_length=120, null=True,\n blank=True, unique=True, verbose_name=\"ldap dn\")\n is_ldap = models.BooleanField(default=False, verbose_name=\"是否ldap用户\")\n ding_userid = models.CharField(\n max_length=150, null=True, blank=True, verbose_name=\"钉钉用户ID\")\n feishu_userid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name=\"飞书UserID\")\n feishu_unionid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name='飞书UnionID')\n feishu_openid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name='飞书OpenID')\n\n @property\n def name(self):\n if self.first_name:\n return self.first_name\n if self.last_name:\n return self.last_name\n return self.username\n\n def __str__(self):\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = False\n icon = 'peoples'\n\n class Meta:\n db_table = 'ucenter_userprofile'\n default_permissions = ()\n verbose_name = \"用户信息\"\n verbose_name_plural = verbose_name\n ordering = ['id']" }, { "identifier": "Workflow", "path": "dbapp/model/model_workflow.py", "snippet": "class Workflow(TimeAbstract):\n \"\"\"\n 工单\n \"\"\"\n\n class STATUS:\n close = '已关闭'\n revoke = '已撤回'\n reject = '被驳回'\n wait = '待处理'\n complete = '已完成'\n failed = '执行失败'\n\n choices = (\n (close, close),\n (revoke, revoke),\n (reject, reject),\n (wait, wait),\n (complete, complete),\n (failed, failed)\n )\n\n wid = models.CharField(max_length=40, null=True, blank=True, unique=True, verbose_name='工单号',\n help_text='前端不需要传值')\n topic = models.CharField(max_length=200, verbose_name='工单标题')\n node = models.CharField(max_length=50, verbose_name='当前节点名')\n status = models.CharField(\n max_length=30, choices=STATUS.choices, verbose_name='工单状态')\n creator = models.ForeignKey(\n UserProfile, null=True, on_delete=models.SET_NULL, verbose_name='发起人')\n template = models.ForeignKey(\n WorkflowTemplateRevisionHistory, verbose_name='模板副本', on_delete=models.PROTECT)\n comment = models.CharField(\n max_length=200, null=True, blank=True, verbose_name='备注')\n extra = models.JSONField(default=dict, verbose_name='扩展数据')\n workflow_flag = models.CharField(\n max_length=8, default='normal', verbose_name='工单标记', help_text='normal: 普通, app: 发版应用, sql: SQL工单')\n\n @property\n def cur_node_conf(self):\n for node_conf in self.template.nodes:\n if node_conf['name'] == self.node:\n return node_conf\n\n def generate_wid(self, save=False):\n st = shortuuid.ShortUUID()\n st.set_alphabet(\"0123456789\")\n self.wid = f\"{datetime.now().strftime('%Y%m%d%H%M%S')}{st.random(length=3)}\"\n if save is True:\n self.save()\n\n class Meta:\n db_table = 'workflow_workflow'\n ordering = ['-id']\n\n def __str__(self):\n return f'{self.template.id}@{self.template.name}-{self.topic}#{self.wid}-{self.status}'" } ]
import base64 import datetime import os import time import logging from django.core.cache import cache from django.db.models import Q from django.db import transaction from django_q.tasks import async_task, schedule from django_q.models import Schedule from common.custom_format import convert_xml_to_str_with_pipeline from common.utils.AesCipher import AesCipher from common.variables import CI_LATEST_KEY from deploy.documents import BuildJobDocument, DeployJobDocument from deploy.serializers import BuildJobListSerializer, DeployJobSerializer from deploy.rds_transfer import rds_transfer_es from qtasks.tasks_build import JenkinsBuild from dbapp.models import AppInfo from dbapp.model.model_cmdb import DevLanguage, KubernetesDeploy from common.ext_fun import get_datadict, get_redis_data, harbor_cli, template_generate from config import PLAYBOOK_PATH from dbapp.model.model_deploy import BuildJob, DeployJob, PublishOrder from dbapp.model.model_ucenter import DataDict, UserProfile from dbapp.model.model_workflow import Workflow
11,866
logger = logging.getLogger(__name__) @transaction.atomic def app_build_handle(request_data, appinfo_obj: AppInfo, user: UserProfile): """ 应用构建 """ cipher = AesCipher('sdevops-platform') commit_tag = request_data.get('commit_tag', None) commits = request_data.get('commits', '') modules = request_data.get('modules', 'dist') custom_tag = request_data.get('custom_tag', None) # {0: 构建, 1: 构建发布} is_deploy = request_data.get('is_deploy', False) language = DevLanguage.objects.get(name=appinfo_obj.app.language) OPS_URL = get_redis_data('platform')['url'] jenkins = get_redis_data('cicd-jenkins') category = appinfo_obj.app.category namespace = appinfo_obj.namespace job_name = appinfo_obj.jenkins_jobname forward = 'no' opshost = '' # 定义harbor空配置 harbor_config = {} build_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S') JENKINS_CONFIG = get_redis_data('cicd-jenkins')
logger = logging.getLogger(__name__) @transaction.atomic def app_build_handle(request_data, appinfo_obj: AppInfo, user: UserProfile): """ 应用构建 """ cipher = AesCipher('sdevops-platform') commit_tag = request_data.get('commit_tag', None) commits = request_data.get('commits', '') modules = request_data.get('modules', 'dist') custom_tag = request_data.get('custom_tag', None) # {0: 构建, 1: 构建发布} is_deploy = request_data.get('is_deploy', False) language = DevLanguage.objects.get(name=appinfo_obj.app.language) OPS_URL = get_redis_data('platform')['url'] jenkins = get_redis_data('cicd-jenkins') category = appinfo_obj.app.category namespace = appinfo_obj.namespace job_name = appinfo_obj.jenkins_jobname forward = 'no' opshost = '' # 定义harbor空配置 harbor_config = {} build_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S') JENKINS_CONFIG = get_redis_data('cicd-jenkins')
jbuild = JenkinsBuild(JENKINS_CONFIG['url'], username=JENKINS_CONFIG['user'],
3
2023-12-13 03:09:32+00:00
16k
liujin112/PortraitDiffusion
app.py
[ { "identifier": "AttentionBase", "path": "utils/masactrl_utils.py", "snippet": "class AttentionBase:\n def __init__(self):\n self.cur_step = 0\n self.num_att_layers = -1\n self.cur_att_layer = 0\n\n def after_step(self):\n pass\n\n def __call__(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = self.forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n self.cur_att_layer += 1\n if self.cur_att_layer == self.num_att_layers:\n self.cur_att_layer = 0\n self.cur_step += 1\n # after step\n self.after_step()\n return out\n\n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = torch.einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=num_heads)\n return out\n\n def reset(self):\n self.cur_step = 0\n self.cur_att_layer = 0" }, { "identifier": "regiter_attention_editor_diffusers", "path": "utils/masactrl_utils.py", "snippet": "def regiter_attention_editor_diffusers(model, editor: AttentionBase):\n \"\"\"\n Register a attention editor to Diffuser Pipeline, refer from [Prompt-to-Prompt]\n \"\"\"\n def ca_forward(self, place_in_unet):\n def forward(x, encoder_hidden_states=None, attention_mask=None, context=None, mask=None):\n \"\"\"\n The attention is similar to the original implementation of LDM CrossAttention class\n except adding some modifications on the attention\n \"\"\"\n if encoder_hidden_states is not None:\n context = encoder_hidden_states\n if attention_mask is not None:\n mask = attention_mask\n\n to_out = self.to_out\n if isinstance(to_out, nn.modules.container.ModuleList):\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n h = self.heads\n q = self.to_q(x)\n is_cross = context is not None\n context = context if is_cross else x\n k = self.to_k(context)\n v = self.to_v(context)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if mask is not None:\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n mask = mask[:, None, :].repeat(h, 1, 1)\n sim.masked_fill_(~mask, max_neg_value)\n\n attn = sim.softmax(dim=-1)\n # the only difference\n out = editor(\n q, k, v, sim, attn, is_cross, place_in_unet,\n self.heads, scale=self.scale)\n\n return to_out(out)\n\n return forward\n\n def register_editor(net, count, place_in_unet):\n for name, subnet in net.named_children():\n if net.__class__.__name__ == 'Attention': # spatial Transformer layer\n net.forward = ca_forward(net, place_in_unet)\n return count + 1\n elif hasattr(net, 'children'):\n count = register_editor(subnet, count, place_in_unet)\n return count\n\n cross_att_count = 0\n for net_name, net in model.unet.named_children():\n if \"down\" in net_name:\n cross_att_count += register_editor(net, 0, \"down\")\n elif \"mid\" in net_name:\n cross_att_count += register_editor(net, 0, \"mid\")\n elif \"up\" in net_name:\n cross_att_count += register_editor(net, 0, \"up\")\n editor.num_att_layers = cross_att_count" }, { "identifier": "register_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_upblock2d(model):\n def up_forward(self):\n def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale=None):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n #print(f\"in upblock2d, hidden states shape: {hidden_states.shape}\")\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb, use_reentrant=False\n )\n else:\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"UpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)" }, { "identifier": "register_crossattn_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_crossattn_upblock2d(model):\n def up_forward(self):\n def forward(\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n #print(f\"in crossatten upblock2d, hidden states shape: {hidden_states.shape}\")\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"CrossAttnUpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)" }, { "identifier": "register_free_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_free_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2,source_mask=None):\n def up_forward(self):\n def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale=None):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n #print(f\"in free upblock2d, hidden states shape: {hidden_states.shape}\")\n \n if self.source_mask is not None:\n spatial_mask_source = F.interpolate(self.source_mask, (hidden_states.shape[2], hidden_states.shape[3]))\n spatial_mask_source_b1 = spatial_mask_source * self.b1 + (1 - spatial_mask_source)\n spatial_mask_source_b2 = spatial_mask_source * self.b2 + (1 - spatial_mask_source)\n # --------------- FreeU code -----------------------\n # Only operate on the first two stages\n if hidden_states.shape[1] == 1280:\n if self.source_mask is not None:\n #where in mask = 0, set hidden states unchanged\n hidden_states[:,:640] = hidden_states[:,:640] * spatial_mask_source_b1\n \n else:\n hidden_states[:,:640] = hidden_states[:,:640] * self.b1\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)\n if hidden_states.shape[1] == 640:\n\n if self.source_mask is not None:\n hidden_states[:,:320] = hidden_states[:,:320] * spatial_mask_source_b2\n else:\n hidden_states[:,:320] = hidden_states[:,:320] * self.b2\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)\n # ---------------------------------------------------------\n\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb, use_reentrant=False\n )\n else:\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"UpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)\n setattr(upsample_block, 'b1', b1)\n setattr(upsample_block, 'b2', b2)\n setattr(upsample_block, 's1', s1)\n setattr(upsample_block, 's2', s2)\n setattr(upsample_block, 'source_mask', source_mask)" }, { "identifier": "register_free_crossattn_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_free_crossattn_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2,source_mask=None):\n def up_forward(self):\n def forward(\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n \n if self.source_mask is not None:\n \n spatial_mask_source = F.interpolate(self.source_mask, (hidden_states.shape[2], hidden_states.shape[3]))\n spatial_mask_source_b1 = spatial_mask_source * self.b1 + (1 - spatial_mask_source)\n spatial_mask_source_b2 = spatial_mask_source * self.b2 + (1 - spatial_mask_source)\n # print(f\"source mask is not none, {spatial_mask_source_b1.shape} with min {spatial_mask_source_b1.min()}\", )\n \n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n #print(f\"in free crossatten upblock2d, hidden states shape: {hidden_states.shape}\")\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n \n # --------------- FreeU code -----------------------\n # Only operate on the first two stages\n if hidden_states.shape[1] == 1280:\n if self.source_mask is not None:\n #where in mask = 0, set hidden states unchanged\n hidden_states[:,:640] = hidden_states[:,:640] * spatial_mask_source_b1\n \n else:\n hidden_states[:,:640] = hidden_states[:,:640] * self.b1\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)\n if hidden_states.shape[1] == 640:\n if self.source_mask is not None:\n hidden_states[:,:320] = hidden_states[:,:320] * spatial_mask_source_b2\n else:\n hidden_states[:,:320] = hidden_states[:,:320] * self.b2\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)\n # ---------------------------------------------------------\n\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n # hidden_states = attn(\n # hidden_states,\n # encoder_hidden_states=encoder_hidden_states,\n # cross_attention_kwargs=cross_attention_kwargs,\n # encoder_attention_mask=encoder_attention_mask,\n # return_dict=False,\n # )[0]\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"CrossAttnUpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)\n setattr(upsample_block, 'b1', b1)\n setattr(upsample_block, 'b2', b2)\n setattr(upsample_block, 's1', s1)\n setattr(upsample_block, 's2', s2)\n setattr(upsample_block, 'source_mask', source_mask)" }, { "identifier": "MaskPromptedStyleAttentionControl", "path": "utils/style_attn_control.py", "snippet": "class MaskPromptedStyleAttentionControl(AttentionBase):\n def __init__(self, start_step=4, start_layer=10, style_attn_step=35, layer_idx=None, step_idx=None, total_steps=50, style_guidance=0.1, \n only_masked_region=False, guidance=0.0, \n style_mask=None, source_mask=None, de_bug=False):\n \"\"\"\n MaskPromptedSAC\n Args:\n start_step: the step to start mutual self-attention control\n start_layer: the layer to start mutual self-attention control\n layer_idx: list of the layers to apply mutual self-attention control\n step_idx: list the steps to apply mutual self-attention control\n total_steps: the total number of steps\n thres: the thereshold for mask thresholding\n ref_token_idx: the token index list for cross-attention map aggregation\n cur_token_idx: the token index list for cross-attention map aggregation\n mask_save_dir: the path to save the mask image\n \"\"\"\n\n super().__init__()\n self.total_steps = total_steps\n self.total_layers = 16\n self.start_step = start_step\n self.start_layer = start_layer\n self.layer_idx = layer_idx if layer_idx is not None else list(range(start_layer, self.total_layers))\n self.step_idx = step_idx if step_idx is not None else list(range(start_step, total_steps))\n print(\"using MaskPromptStyleAttentionControl\")\n print(\"MaskedSAC at denoising steps: \", self.step_idx)\n print(\"MaskedSAC at U-Net layers: \", self.layer_idx)\n \n self.de_bug = de_bug\n self.style_guidance = style_guidance\n self.only_masked_region = only_masked_region\n self.style_attn_step = style_attn_step\n self.self_attns = []\n self.cross_attns = []\n self.guidance = guidance\n self.style_mask = style_mask\n self.source_mask = source_mask\n\n\n def after_step(self):\n self.self_attns = []\n self.cross_attns = []\n\n def attn_batch(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n \n if q_mask is not None:\n sim = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n if k_mask is not None:\n sim = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n attn = sim.softmax(-1) if attn is None else attn\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def attn_batch_fg_bg(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n if q_mask is not None:\n sim_fg = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(q_mask.unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n if k_mask is not None:\n sim_fg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n sim = torch.cat([sim_fg, sim_bg])\n attn = sim.softmax(-1)\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n\n \"\"\"\n Attention forward function\n \"\"\"\n \n if is_cross or self.cur_step not in self.step_idx or self.cur_att_layer // 2 not in self.layer_idx:\n return super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n\n B = q.shape[0] // num_heads // 2\n H = W = int(np.sqrt(q.shape[1]))\n \n if self.style_mask is not None and self.source_mask is not None:\n #mask = self.aggregate_cross_attn_map(idx=self.cur_token_idx) # (4, H, W)\n heigh, width = self.style_mask.shape[-2:]\n mask_style = self.style_mask# (H, W)\n mask_source = self.source_mask# (H, W)\n scale = int(np.sqrt(heigh * width / q.shape[1]))\n # res = int(np.sqrt(q.shape[1]))\n spatial_mask_source = F.interpolate(mask_source, (heigh//scale, width//scale)).reshape(-1, 1)\n spatial_mask_style = F.interpolate(mask_style, (heigh//scale, width//scale)).reshape(-1, 1)\n \n else:\n spatial_mask_source=None\n spatial_mask_style=None\n\n if spatial_mask_style is None or spatial_mask_source is None:\n \n out_s,out_c,out_t = self.style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n \n else:\n if self.only_masked_region:\n out_s,out_c,out_t = self.mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n else:\n out_s,out_c,out_t = self.separate_mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n\n out = torch.cat([out_s,out_c,out_t],dim=0) \n return out\n \n\n def style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n qs, qc, qt = q.chunk(3)\n\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n\n if self.cur_step < self.style_attn_step:\n out_t = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n else:\n out_t = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c + (out_t - out_c) * self.style_guidance\n return out_s,out_c,out_t\n\n def mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n qs, qc, qt = q.chunk(3)\n \n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], attn[num_heads: 2*num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n out_c_new = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n\n if self.cur_step < self.style_attn_step:\n out_t = out_c #self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n else:\n out_t_fg = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n \n out_t = out_t * spatial_mask_source + out_c * (1 - spatial_mask_source)\n\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n # print(torch.sum(out_t* (1 - spatial_mask_source) - out_c * (1 - spatial_mask_source)))\n return out_s,out_c,out_t\n\n def separate_mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n # To prevent query confusion, render fg and bg according to mask.\n qs, qc, qt = q.chunk(3)\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.cur_step < self.style_attn_step: \n \n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg,out_c_bg = out_c.chunk(2)\n out_t = out_c_fg * spatial_mask_source + out_c_bg * (1 - spatial_mask_source)\n\n else:\n out_t = self.attn_batch_fg_bg(qt, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_t_fg,out_t_bg = out_t.chunk(2)\n out_c_fg,out_c_bg = out_c.chunk(2)\n if self.style_guidance>=0:\n out_t_fg = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n out_t_bg = out_c_bg + (out_t_bg - out_c_bg) * self.style_guidance \n out_t = out_t_fg * spatial_mask_source + out_t_bg * (1 - spatial_mask_source)\n \n return out_s,out_t,out_t" }, { "identifier": "MasaCtrlPipeline", "path": "utils/pipeline.py", "snippet": "class MasaCtrlPipeline(StableDiffusionPipeline):\n\n def next_step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta=0.,\n verbose=False\n ):\n \"\"\"\n Inverse sampling for DDIM Inversion\n \"\"\"\n if verbose:\n print(\"timestep: \", timestep)\n next_step = timestep\n timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999)\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output\n x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir\n return x_next, pred_x0\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta: float=0.0,\n verbose=False,\n ):\n \"\"\"\n predict the sampe the next step in the denoise process.\n \"\"\"\n prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output\n x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir\n return x_prev, pred_x0\n\n @torch.no_grad()\n def image2latent(self, image):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE)\n # input image density range [-1, 1]\n latents = self.vae.encode(image)['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n @torch.no_grad()\n def latent2image(self, latents, return_type='np'):\n latents = 1 / 0.18215 * latents.detach()\n image = self.vae.decode(latents)['sample']\n if return_type == 'np':\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()[0]\n image = (image * 255).astype(np.uint8)\n elif return_type == \"pt\":\n image = (image / 2 + 0.5).clamp(0, 1)\n\n return image\n\n def latent2image_grad(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents)['sample']\n\n return image # range [-1, 1]\n\n @torch.no_grad()\n def __call__(\n self,\n prompt,\n batch_size=1,\n height=512,\n width=512,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n latents=None,\n unconditioning=None,\n neg_prompt=None,\n ref_intermediate_latents=None,\n return_intermediates=False,\n lcm_lora=False,\n de_bug=False,\n **kwds):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if isinstance(prompt, list):\n batch_size = len(prompt)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # if kwds.get(\"dir\"):\n # dir = text_embeddings[-2] - text_embeddings[-1]\n # u, s, v = torch.pca_lowrank(dir.transpose(-1, -2), q=1, center=True)\n # text_embeddings[-1] = text_embeddings[-1] + kwds.get(\"dir\") * v\n # print(u.shape)\n # print(v.shape)\n\n # define initial latents\n latents_shape = (batch_size, self.unet.config.in_channels, height//8, width//8)\n if latents is None:\n latents = torch.randn(latents_shape, device=DEVICE)\n else:\n assert latents.shape == latents_shape, f\"The shape of input latent tensor {latents.shape} should equal to predefined one.\"\n\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n if neg_prompt:\n uc_text = neg_prompt\n else:\n uc_text = \"\"\n # uc_text = \"ugly, tiling, poorly drawn hands, poorly drawn feet, body out of frame, cut off, low contrast, underexposed, distorted face\"\n unconditional_input = self.tokenizer(\n [uc_text] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n # unconditional_input.input_ids = unconditional_input.input_ids[:, 1:]\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # iterative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n # print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n latents_list = [latents]\n pred_x0_list = [latents]\n if de_bug:\n import pdb;pdb.set_trace()\n for i, t in enumerate(tqdm(self.scheduler.timesteps, desc=\"DDIM Sampler\")):\n if ref_intermediate_latents is not None:\n # note that the batch_size >= 2\n latents_ref = ref_intermediate_latents[-1 - i]\n _, latents_cur = latents.chunk(2)\n latents = torch.cat([latents_ref, latents_cur])\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n if unconditioning is not None and isinstance(unconditioning, list):\n _, text_embeddings = text_embeddings.chunk(2)\n text_embeddings = torch.cat([unconditioning[i].expand(*text_embeddings.shape), text_embeddings]) \n # predict tghe noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t -> x_t-1\n if lcm_lora:\n latents, pred_x0 = self.scheduler.step(noise_pred, t, latents, return_dict=False)\n else:\n latents, pred_x0 = self.step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n image = self.latent2image(latents, return_type=\"pt\")\n if return_intermediates:\n pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n latents_list = [self.latent2image(img, return_type=\"pt\") for img in latents_list]\n return image, pred_x0_list, latents_list\n return image\n\n @torch.no_grad()\n def invert(\n self,\n image: torch.Tensor,\n prompt,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n return_intermediates=False,\n **kwds):\n \"\"\"\n invert a real image into noise map with determinisc DDIM inversion\n \"\"\"\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n batch_size = image.shape[0]\n if isinstance(prompt, list):\n if batch_size == 1:\n image = image.expand(len(prompt), -1, -1, -1)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # define initial latents\n latents = self.image2latent(image)\n start_latents = latents\n # print(latents)\n # exit()\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n unconditional_input = self.tokenizer(\n [\"\"] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # interative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n # print(\"attributes: \", self.scheduler.__dict__)\n latents_list = [latents]\n pred_x0_list = [latents]\n for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc=\"DDIM Inversion\")):\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n\n # predict the noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t-1 -> x_t\n latents, pred_x0 = self.next_step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n if return_intermediates:\n # return the intermediate laters during inversion\n # pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n return latents, latents_list\n return latents, start_latents" } ]
import os import torch import random import numpy as np import gradio as gr import torch.nn.functional as F from glob import glob from datetime import datetime from diffusers import StableDiffusionPipeline from diffusers import DDIMScheduler, LCMScheduler from PIL import Image,ImageDraw from utils.masactrl_utils import (AttentionBase, regiter_attention_editor_diffusers) from utils.free_lunch_utils import register_upblock2d,register_crossattn_upblock2d,register_free_upblock2d, register_free_crossattn_upblock2d from utils.style_attn_control import MaskPromptedStyleAttentionControl from utils.pipeline import MasaCtrlPipeline from torchvision.utils import save_image from segment_anything import sam_model_registry, SamPredictor
12,314
self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False controller = MaskPromptedStyleAttentionControl(start_step, start_layer, style_attn_step=Style_attn_step, style_guidance=Style_Guidance, style_mask=style_mask, source_mask=source_mask, only_masked_region=only_masked_region, guidance=scale, de_bug=de_bug, ) if freeu: # model.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) print(f'++++++++++++++++++ Run with FreeU {b1}_{b2}_{s1}_{s2} ++++++++++++++++') if Method != "Without mask": register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask) register_free_crossattn_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask) else: register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=None) register_free_crossattn_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=None) else: print(f'++++++++++++++++++ Run without FreeU ++++++++++++++++') # model.disable_freeu()
css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ class GlobalText: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.personalized_model_dir = './models/Stable-diffusion' self.lora_model_dir = './models/Lora' self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S")) self.savedir_sample = os.path.join(self.savedir, "sample") self.savedir_mask = os.path.join(self.savedir, "mask") self.stable_diffusion_list = ["runwayml/stable-diffusion-v1-5", "latent-consistency/lcm-lora-sdv1-5"] self.personalized_model_list = [] self.lora_model_list = [] # config models self.tokenizer = None self.text_encoder = None self.vae = None self.unet = None self.pipeline = None self.lora_loaded = None self.lcm_lora_loaded = False self.personal_model_loaded = None self.sam_predictor = None self.lora_model_state_dict = {} self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # self.refresh_stable_diffusion() self.refresh_personalized_model() self.reset_start_code() def load_base_pipeline(self, model_path): print(f'loading {model_path} model') scheduler = DDIMScheduler.from_pretrained(model_path,subfolder="scheduler") self.pipeline = MasaCtrlPipeline.from_pretrained(model_path, scheduler=scheduler).to(self.device) def refresh_stable_diffusion(self): self.load_base_pipeline(self.stable_diffusion_list[0]) self.lora_loaded = None self.personal_model_loaded = None self.lcm_lora_loaded = False return self.stable_diffusion_list[0] def refresh_personalized_model(self): personalized_model_list = glob(os.path.join(self.personalized_model_dir, "**/*.safetensors"), recursive=True) self.personalized_model_list = {os.path.basename(file): file for file in personalized_model_list} lora_model_list = glob(os.path.join(self.lora_model_dir, "**/*.safetensors"), recursive=True) self.lora_model_list = {os.path.basename(file): file for file in lora_model_list} def update_stable_diffusion(self, stable_diffusion_dropdown): if stable_diffusion_dropdown == 'latent-consistency/lcm-lora-sdv1-5': self.load_lcm_lora() else: self.load_base_pipeline(stable_diffusion_dropdown) self.lora_loaded = None self.personal_model_loaded = None return gr.Dropdown() def update_base_model(self, base_model_dropdown): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: base_model = self.personalized_model_list[base_model_dropdown] mid_model = StableDiffusionPipeline.from_single_file(base_model) self.pipeline.vae = mid_model.vae self.pipeline.unet = mid_model.unet self.pipeline.text_encoder = mid_model.text_encoder self.pipeline.to(self.device) self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False controller = MaskPromptedStyleAttentionControl(start_step, start_layer, style_attn_step=Style_attn_step, style_guidance=Style_Guidance, style_mask=style_mask, source_mask=source_mask, only_masked_region=only_masked_region, guidance=scale, de_bug=de_bug, ) if freeu: # model.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) print(f'++++++++++++++++++ Run with FreeU {b1}_{b2}_{s1}_{s2} ++++++++++++++++') if Method != "Without mask": register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask) register_free_crossattn_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask) else: register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=None) register_free_crossattn_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=None) else: print(f'++++++++++++++++++ Run without FreeU ++++++++++++++++') # model.disable_freeu()
register_upblock2d(model)
2
2023-12-06 01:18:39+00:00
16k
AsuradaYuci/TF-CLIP
datasets/make_dataloader_clipreid.py
[ { "identifier": "VideoDataset", "path": "datasets/video_loader_xh.py", "snippet": "class VideoDataset(Dataset):\n \"\"\"Video Person ReID Dataset.\n Note batch data has shape (batch, seq_len, channel, height, width).\n \"\"\"\n sample_methods = ['evenly', 'random', 'dense']\n\n def __init__(self, dataset, seq_len=15, sample='evenly', transform=None):\n self.dataset = dataset\n self.seq_len = seq_len\n self.sample = sample\n self.transform = transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self.__get_single_item__(index) for index in indices]\n return self.__get_single_item__(indices)\n\n def __get_single_item__(self, index):\n S = self.seq_len # 4\n img_paths, pid, camid, trackid = self.dataset[index]\n num = len(img_paths) # 27\n \"\"\"rss 操作\"\"\"\n sample_clip = []\n frame_indices = list(range(num))\n if num < S: # 8 = chunk的数目,每个tracklet分成8段,每段随机选一帧\n strip = list(range(num)) + [frame_indices[-1]] * (S - num)\n for s in range(S):\n pool = strip[s * 1:(s + 1) * 1]\n sample_clip.append(list(pool))\n else:\n inter_val = math.ceil(num / S)\n strip = list(range(num)) + [frame_indices[-1]] * (inter_val * S - num)\n for s in range(S):\n pool = strip[inter_val * s:inter_val * (s + 1)]\n sample_clip.append(list(pool))\n\n sample_clip = np.array(sample_clip)\n\n if self.sample == 'random':\n \"\"\"\n Randomly sample seq_len consecutive frames from num frames,\n if num is smaller than seq_len, then replicate items.\n This sampling strategy is used in training phase.\n \"\"\"\n frame_indices = list(range(num))\n rand_end = max(0, len(frame_indices) - self.seq_len - 1)\n begin_index = random.randint(0, rand_end)\n end_index = min(begin_index + self.seq_len, len(frame_indices))\n\n indices = frame_indices[begin_index:end_index]\n\n for index in indices:\n if len(indices) >= self.seq_len:\n break\n indices.append(index)\n indices = np.array(indices)\n imgseq = []\n for index in indices:\n index = int(index)\n img_path = img_paths[index]\n img = Image.open(img_path).convert('RGB') # 3x224x112\n imgseq.append(img)\n\n seq = [imgseq]\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], dim=0) # seq_len 4x3x224x112\n flow_tensor = None\n\n return img_tensor, pid, camid\n\n elif self.sample == 'dense':\n \"\"\"\n Sample all frames in a video into a list of clips, each clip contains seq_len frames, batch_size needs to be set to 1.\n This sampling strategy is used in test phase.\n \"\"\"\n cur_index = 0\n frame_indices = list(range(num)) # 27\n indices_list = []\n while num-cur_index > self.seq_len:\n indices_list.append(frame_indices[cur_index:cur_index+self.seq_len])\n cur_index += self.seq_len\n\n last_seq = frame_indices[cur_index:]\n\n for index in last_seq:\n if len(last_seq) >= self.seq_len:\n break\n last_seq.append(index)\n\n indices_list.append(last_seq) # <class 'list'>: [[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 24, 25, 24, 25, 24, 25]]\n imgs_list = []\n for indices in indices_list: # <class 'list'>: [0, 1, 2, 3, 4, 5, 6, 7]\n imgs = []\n for index in indices:\n index = int(index)\n img_path = img_paths[index]\n img = Image.open(img_path).convert('RGB')\n # img = img.unsqueeze(0)\n imgs.append(img)\n\n imgs = [imgs]\n if self.transform is not None:\n imgs = self.transform(imgs)\n imgs = torch.stack(imgs[0], 0) # torch.Size([8, 3, 224, 112])\n imgs_list.append(imgs)\n imgs_tensor = torch.stack(imgs_list) # torch.Size([13, 8, 3, 224, 112])\n # flow_tensor = None\n return imgs_tensor, pid, camid, trackid, \"\"\n\n elif self.sample == 'rrs_train':\n idx = np.random.choice(sample_clip.shape[1], sample_clip.shape[0])\n number = sample_clip[np.arange(len(sample_clip)), idx]\n # imgseq = []\n img_paths = np.array(list(img_paths)) # img_paths原始为tuple,转换成数组\n # flow_paths = np.array([img_path.replace('Mars', 'Mars_optical') for img_path in img_paths])\n imgseq = [Image.open(img_path).convert('RGB') for img_path in img_paths[number]]\n # flowseq = [Image.open(flow_path).convert('RGB') for flow_path in flow_paths[number]]\n\n seq = [imgseq]\n # seq = [imgseq, flowseq]\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], dim=0) # seq_len 4x3x224x112\n # flow_tensor = torch.stack(seq[1], dim=0) # seq_len 4x3x224x112\n\n return img_tensor, pid, camid, trackid, \"\"\n\n elif self.sample == 'rrs_test':\n number = sample_clip[:, 0]\n img_paths = np.array(list(img_paths)) # img_paths原始为tuple,转换成数组\n # flow_paths = np.array([img_path.replace('Mars', 'Mars_optical') for img_path in img_paths])\n imgseq = [Image.open(img_path).convert('RGB') for img_path in img_paths[number]]\n # flowseq = [Image.open(flow_path).convert('RGB') for flow_path in flow_paths[number]]\n\n seq = [imgseq]\n # seq = [imgseq, flowseq]\n if self.transform is not None:\n seq = self.transform(seq)\n img_tensor = torch.stack(seq[0], dim=0) # torch.Size([8, 3, 256, 128])\n # flow_tensor = torch.stack(seq[1], dim=0)\n return img_tensor, pid, camid, trackid, \"\"\n else:\n raise KeyError(\"Unknown sample method: {}. Expected one of {}\".format(self.sample, self.sample_methods))" }, { "identifier": "RandomIdentitySampler", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySampler(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Args:\n - data_source (Dataset): dataset to sample from.\n - num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, batch_size, num_instances=4):\n self.data_source = data_source\n self.batch_size = batch_size # 16\n self.num_instances = num_instances # 4\n self.num_pids_per_batch = self.batch_size // self.num_instances # 4\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n # compute number of examples in an epoch\n self.length = 0\n for pid in self.pids:\n idxs = self.index_dic[pid]\n num = len(idxs)\n if num < self.num_instances:\n num = self.num_instances\n self.length += num - num % self.num_instances # 7532\n\n def __iter__(self):\n\n batch_idxs_dict = defaultdict(list)\n\n for pid in self.pids: # 每个Pid选择4个序列\n idxs = copy.deepcopy(self.index_dic[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True)\n random.shuffle(idxs)\n batch_idxs = []\n for idx in idxs:\n batch_idxs.append(idx)\n if len(batch_idxs) == self.num_instances:\n batch_idxs_dict[pid].append(batch_idxs)\n batch_idxs = []\n\n avai_pids = copy.deepcopy(self.pids)\n final_idxs = []\n\n while len(avai_pids) >= self.num_pids_per_batch: # 选择P个ID\n selected_pids = random.sample(avai_pids, self.num_pids_per_batch)\n for pid in selected_pids:\n batch_idxs = batch_idxs_dict[pid].pop(0)\n final_idxs.extend(batch_idxs)\n if len(batch_idxs_dict[pid]) == 0:\n avai_pids.remove(pid)\n\n return iter(final_idxs)\n\n def __len__(self):\n return self.length" }, { "identifier": "RandomIdentitySamplerForSeq", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySamplerForSeq(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Args:\n - data_source (Dataset): dataset to sample from.\n - num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, batch_size, num_instances=4):\n self.data_source = data_source\n self.batch_size = batch_size # 256\n self.num_instances = num_instances # K=4\n self.num_pids_per_batch = self.batch_size // self.num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids) # 625\n\n # compute number of examples in an epoch\n self.length = 0\n for pid in self.pids:\n idxs = self.index_dic[pid]\n num = len(idxs)\n if num < self.num_instances:\n num = self.num_instances\n self.length += num - num % self.num_instances\n\n def __iter__(self):\n\n batch_idxs_dict = defaultdict(list)\n\n for pid in self.pids:\n idxs = copy.deepcopy(self.index_dic[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True)\n random.shuffle(idxs)\n batch_idxs = []\n for idx in idxs:\n batch_idxs.append(idx)\n if len(batch_idxs) == self.num_instances:\n batch_idxs_dict[pid].append(batch_idxs)\n batch_idxs = []\n\n avai_pids = copy.deepcopy(self.pids)\n final_idxs = []\n\n while len(avai_pids) >= self.num_pids_per_batch:\n selected_pids = random.sample(avai_pids, self.num_pids_per_batch)\n for pid in selected_pids:\n batch_idxs = batch_idxs_dict[pid].pop(0)\n final_idxs.extend(batch_idxs)\n if len(batch_idxs_dict[pid]) == 0:\n avai_pids.remove(pid)\n\n return iter(final_idxs)\n\n def __len__(self):\n return self.length" }, { "identifier": "RandomIdentitySamplerWYQ", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySamplerWYQ(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.\n\n Args:\n data_source (Dataset): dataset to sample from.\n num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, num_instances=4):\n super(RandomIdentitySampler).__init__()\n self.data_source = data_source\n self.num_instances = num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n def __iter__(self):\n indices = torch.randperm(self.num_identities)\n ret = []\n for i in indices:\n pid = self.pids[i]\n t = self.index_dic[pid]\n replace = False if len(t) >= self.num_instances else True\n t = np.random.choice(t, size=self.num_instances, replace=replace)\n ret.extend(t)\n # print(ret)\n return iter(ret)\n\n def __len__(self):\n return self.num_identities * self.num_instances" }, { "identifier": "SeqTrainPreprocessor", "path": "datasets/seqpreprocessor.py", "snippet": "class SeqTrainPreprocessor(object):\n def __init__(self, seqset, dataset, seq_len, transform=None):\n super(SeqTrainPreprocessor, self).__init__()\n self.seqset = seqset\n self.identities = dataset.identities\n self.transform = transform\n self.seq_len = seq_len\n self.root = [dataset.images_dir]\n self.root.append(dataset.other_dir)\n\n def __len__(self):\n return len(self.seqset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self._get_single_item(index) for index in indices]\n return self._get_single_item(indices)\n\n def _get_single_item(self, index):\n\n start_ind, end_ind, pid, label, camid = self.seqset[index]\n\n imgseq = []\n flowseq = []\n for ind in range(start_ind, end_ind):\n fname = self.identities[pid][camid][ind]\n fpath_img = osp.join(self.root[0], fname)\n imgrgb = Image.open(fpath_img).convert('RGB')\n fpath_flow = osp.join(self.root[1], fname)\n flowrgb = Image.open(fpath_flow).convert('RGB')\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n while len(imgseq) < self.seq_len:\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n seq = [imgseq, flowseq]\n\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], 0)\n\n flow_tensor = torch.stack(seq[1], 0)\n\n return img_tensor, flow_tensor, label, camid" }, { "identifier": "SeqTestPreprocessor", "path": "datasets/seqpreprocessor.py", "snippet": "class SeqTestPreprocessor(object):\n\n def __init__(self, seqset, dataset, seq_len, transform=None):\n super(SeqTestPreprocessor, self).__init__()\n self.seqset = seqset\n self.identities = dataset.identities\n self.transform = transform\n self.seq_len = seq_len\n self.root = [dataset.images_dir]\n self.root.append(dataset.other_dir)\n\n def __len__(self):\n return len(self.seqset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self._get_single_item(index) for index in indices]\n return self._get_single_item(indices)\n\n def _get_single_item(self, index):\n\n start_ind, end_ind, pid, label, camid = self.seqset[index]\n\n imgseq = []\n flowseq = []\n for ind in range(start_ind, end_ind):\n fname = self.identities[pid][camid][ind]\n fpath_img = osp.join(self.root[0], fname)\n imgrgb = Image.open(fpath_img).convert('RGB')\n fpath_flow = osp.join(self.root[1], fname)\n flowrgb = Image.open(fpath_flow).convert('RGB')\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n while len(imgseq) < self.seq_len:\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n seq = [imgseq, flowseq]\n\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], 0)\n\n if len(self.root) == 2:\n flow_tensor = torch.stack(seq[1], 0)\n else:\n flow_tensor = None\n\n return img_tensor, flow_tensor, pid, camid" }, { "identifier": "Mars", "path": "datasets/set/mars.py", "snippet": "class Mars(object):\n root = '/18640539002/dataset_cc/Mars/'\n train_name_path = osp.join(root, 'info/train_name.txt')\n test_name_path = osp.join(root, 'info/test_name.txt')\n track_train_info_path = osp.join(root, 'info/tracks_train_info.mat')\n track_test_info_path = osp.join(root, 'info/tracks_test_info.mat')\n query_IDX_path = osp.join(root, 'info/query_IDX.mat')\n split_train_json_path = osp.join(root, 'split_train.json')\n split_query_json_path = osp.join(root, 'split_query.json')\n split_gallery_json_path = osp.join(root, 'split_gallery.json')\n \n def __init__(self, root= '../data/Mars/', min_seq_len=0):\n self._check_before_run()\n\n train_names = self._get_names(self.train_name_path) # <class 'list'>: <Len: 509914> '0001C1T0001F001.jpg'\n test_names = self._get_names(self.test_name_path) # <class 'list'>: <Len: 681089> '00-1C1T0001F001.jpg'\n track_train = loadmat(self.track_train_info_path)[\n 'track_train_info'] # numpy.ndarray (8298, 4) [[1 16 1 1],[17 95 1 1] ...]\n track_test = loadmat(self.track_test_info_path)[\n 'track_test_info'] # numpy.ndarray (12180, 4) [[1 24 -1 1][25 34 -1 1]]\n \n query_IDX = loadmat(self.query_IDX_path)['query_IDX'].squeeze() # numpy.ndarray (1980,) [4130, 4138...]\n query_IDX -= 1 # index from 0 [4129,4137....]\n track_query = track_test[query_IDX, :] # 对应行的小段视频信息,[[171610 171649 2 1],[172214 172313 2 2]...]\n \n gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX] # gallery = 10200\n track_gallery = track_test[gallery_IDX, :] # <class 'tuple'>: (12180, 4) [[1 24 -1 1][25 34 -1 1]...]\n\n train, num_train_tracklets, num_train_pids, num_train_imgs, num_train_cams, num_train_vids = \\\n self._process_data(train_names, track_train, home_dir='bbox_train', relabel=True,\n min_seq_len=min_seq_len, json_path=self.split_train_json_path)\n\n query, num_query_tracklets, num_query_pids, num_query_imgs, query_pid, query_camid = \\\n self._process_gallery_data(test_names, track_query, home_dir='bbox_test', relabel=False,\n min_seq_len=min_seq_len, json_path=self.split_query_json_path,)\n\n gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs, gallery_pid, gallery_camid = \\\n self._process_gallery_data(test_names, track_gallery, home_dir='bbox_test', relabel=False,\n min_seq_len=min_seq_len, json_path=self.split_gallery_json_path)\n\n num_imgs_per_tracklet = num_train_imgs + num_query_imgs + num_gallery_imgs\n min_num = np.min(num_imgs_per_tracklet)\n max_num = np.max(num_imgs_per_tracklet)\n avg_num = np.mean(num_imgs_per_tracklet)\n\n num_total_pids = num_train_pids + num_query_pids\n num_total_tracklets = num_train_tracklets + num_query_tracklets + num_gallery_tracklets\n\n print(\"=> MARS loaded\")\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # tracklets\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_tracklets))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_tracklets))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_tracklets))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_tracklets))\n print(\" number of images per tracklet: {} ~ {}, average {:.1f}\".format(min_num, max_num, avg_num))\n print(\" ------------------------------\")\n\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n self.queryinfo = infostruct()\n self.queryinfo.pid = query_pid\n self.queryinfo.camid = query_camid\n self.queryinfo.tranum = num_query_imgs\n\n self.galleryinfo = infostruct()\n self.galleryinfo.pid = gallery_pid\n self.galleryinfo.camid = gallery_camid\n self.galleryinfo.tranum = num_gallery_imgs\n\n self.num_train_cams = num_train_cams\n self.num_train_vids = num_train_vids\n \n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))\n \n def _get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n names.append(new_line)\n return names\n \n def _process_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0, json_path=''):\n if osp.exists(json_path):\n print(\"=> {} generated before, awesome!\".format(json_path))\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['num_cams'], split['num_tracks']\n print(\"=> Automatically generating split (might take a while for the first time, have a coffe)\")\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0] # 8298 TODO: 要不要增加?\n pid_list = list(set(meta_data[:, 2].tolist())) # pid = 625 => [1 3 5 7 9...]\n num_pids = len(pid_list)\n\n if relabel:\n pid2label = {pid: label for label, pid in enumerate(pid_list)} # {1:0,3:1,5:2,...}\n tracklets = []\n num_imgs_per_tracklet = []\n cams = []\n \n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx, ...] # [1 16 1 1]\n start_index, end_index, pid, camid = data\n \n cams += [int(camid)]\n \n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel:\n pid = pid2label[pid] # pid = 0\n camid -= 1\n # index starts from 0\n img_names = names[start_index - 1:end_index]\n # <class 'list'>:['0001C1T0001F001.jpg'.. '0001C1T0001F016.jpg']\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names] # pnames = ['0001','0001'...]\n assert len(set(pnames)) == 1, \"Error: a single tracklet contains different person images\"\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names] # camnames = ['1','1'...]\n assert len(set(camnames)) == 1, \"Error: images are captured under different cameras!\"\n\n # append image names with directory information\n # '/media/ying/0BDD17830BDD1783/ReIdDataset/Mars/bbox_train/0001/0001C1T0001F001.jpg'\n img_paths = [osp.join(self.root, home_dir, img_name[:4], img_name) for img_name in img_names] # list<16>\n # print(img_paths)\n \n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, int(pid), int(camid), 1)) # (('.jpg','.jpg','每张图片的路径'), 0'行人id', 0'camid' trackid)\n num_imgs_per_tracklet.append(len(img_paths)) # [16,79,15...'每个小段视频包含的图片帧数目']\n\n num_tracklets = len(tracklets) # 8298\n\n cams = set(cams)\n num_cams = len(cams)\n\n print(\"Saving split to {}\".format(json_path))\n split_dict = {\n 'tracklets': tracklets,\n 'num_tracklets': num_tracklets,\n 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet,\n 'num_cams' : num_cams,\n 'num_tracks' : 1\n }\n write_json(split_dict, json_path)\n\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, num_cams, 1\n \n def _process_gallery_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0, json_path=''):\n if osp.exists(json_path):\n print(\"=> {} generated before, awesome!\".format(json_path))\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['pids'], split['camid']\n\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0] # 8298 TODO: 要不要增加?\n pid_list = list(set(meta_data[:, 2].tolist())) # pid = 625 => [1 3 5 7 9...]\n num_pids = len(pid_list) # 626 622\n\n if relabel:\n pid2label = {pid: label for label, pid in enumerate(pid_list)} # {1:0,3:1,5:2,...}\n tracklets = []\n num_imgs_per_tracklet = []\n gallery_pid = []\n gallery_camid = []\n\n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx, ...] # [1 16 1 1]\n start_index, end_index, pid, camid = data\n\n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel:\n pid = pid2label[pid] # pid = 0\n camid -= 1\n # index starts from 0\n img_names = names[start_index - 1:end_index]\n # <class 'list'>:['0001C1T0001F001.jpg'.. '0001C1T0001F016.jpg']\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names] # pnames = ['0001','0001'...]\n assert len(set(pnames)) == 1, \"Error: a single tracklet contains different person images\"\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names] # camnames = ['1','1'...]\n assert len(set(camnames)) == 1, \"Error: images are captured under different cameras!\"\n\n # append image names with directory information\n # '/media/ying/0BDD17830BDD1783/ReIdDataset/Mars/bbox_train/0001/0001C1T0001F001.jpg'\n img_paths = [osp.join(self.root, home_dir, img_name[:4], img_name) for img_name in img_names] # list<16>\n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, int(pid), int(camid), 1)) # (('.jpg','.jpg','每张图片的路径'), 0'行人id', 0'camid' )\n num_imgs_per_tracklet.append(len(img_paths)) # [16,79,15...'每个小段视频包含的图片帧数目']\n gallery_pid.append(int(pid))\n gallery_camid.append(int(camid))\n num_tracklets = len(tracklets) # 8298\n print(\"Saving split to {}\".format(json_path))\n split_dict = {\n 'tracklets': tracklets,\n 'num_tracklets': num_tracklets,\n 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet,\n 'pids': gallery_pid,\n 'camid': gallery_camid,\n }\n write_json(split_dict, json_path)\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, gallery_pid, gallery_camid" }, { "identifier": "iLIDSVIDSEQUENCE", "path": "datasets/set/ilidsvidsequence.py", "snippet": "class iLIDSVIDSEQUENCE(Datasequence):\n\n def __init__(self, root, split_id=0, seq_len=12, seq_srd=6, num_val=1, download=False):\n super(iLIDSVIDSEQUENCE, self).__init__(root, split_id=split_id)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n self.imgextract()\n # --> load完后就有了train,val,和trainval,实际上最开始只有trainval,我们按照num_val\n self.load(seq_len, seq_srd, num_val)\n self.num_train_cams = 2\n self.num_train_vids = 1\n\n self.query, query_pid, query_camid, query_num = self._pluckseq_cam(self.identities, self.split['query'],\n seq_len, seq_srd, 0)\n self.queryinfo = infostruct()\n self.queryinfo.pid = query_pid\n self.queryinfo.camid = query_camid\n self.queryinfo.tranum = query_num\n\n self.gallery, gallery_pid, gallery_camid, gallery_num = self._pluckseq_cam(self.identities,\n self.split['gallery'],\n seq_len, seq_srd, 1)\n self.galleryinfo = infostruct()\n self.galleryinfo.pid = gallery_pid\n self.galleryinfo.camid = gallery_camid\n self.galleryinfo.tranum = gallery_num\n\n @property\n def other_dir(self):\n return osp.join(self.root, 'others')\n\n def download(self):\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n raw_dir = osp.join(self.root, 'raw')\n mkdir_if_missing(raw_dir)\n\n fpath1 = osp.join(raw_dir, datasetname + '.tar')\n fpath2 = osp.join(raw_dir, flowname + '.tar')\n\n if osp.isfile(fpath1) and osp.isfile(fpath2):\n print(\"Using the download file:\" + fpath1 + \" \" + fpath2)\n else:\n print(\"Please firstly download the files\")\n raise RuntimeError(\"Downloaded file missing!\")\n\n def imgextract(self):\n\n raw_dir = osp.join(self.root, 'raw')\n exdir1 = osp.join(raw_dir, datasetname)\n exdir2 = osp.join(raw_dir, flowname)\n fpath1 = osp.join(raw_dir, datasetname + '.tar')\n fpath2 = osp.join(raw_dir, flowname + '.tar')\n\n if not osp.isdir(exdir1):\n print(\"Extracting tar file\")\n cwd = os.getcwd()\n tar = tarfile.open(fpath1)\n mkdir_if_missing(exdir1)\n os.chdir(exdir1)\n tar.extractall()\n tar.close()\n os.chdir(cwd)\n\n if not osp.isdir(exdir2):\n print(\"Extracting tar file\")\n cwd = os.getcwd()\n tar = tarfile.open(fpath2)\n mkdir_if_missing(exdir2)\n os.chdir(exdir2)\n tar.extractall()\n tar.close()\n os.chdir(cwd)\n\n # reorganzing the dataset\n # Format\n\n temp_images_dir = osp.join(self.root, 'temp_images')\n mkdir_if_missing(temp_images_dir)\n\n temp_others_dir = osp.join(self.root, 'temp_others')\n mkdir_if_missing(temp_others_dir)\n\n images_dir = osp.join(self.root, 'images')\n mkdir_if_missing(images_dir)\n\n others_dir = osp.join(self.root, 'others')\n mkdir_if_missing(others_dir)\n\n fpaths1 = sorted(glob(osp.join(exdir1, 'i-LIDS-VID/sequences', '*/*/*.png')))\n fpaths2 = sorted(glob(osp.join(exdir2, flowname, '*/*/*.png')))\n\n identities_imgraw = [[[] for _ in range(2)] for _ in range(319)]\n identities_otherraw = [[[] for _ in range(2)] for _ in range(319)]\n\n # image information\n for fpath in fpaths1:\n fname = osp.basename(fpath)\n fname_list = fname.split('_')\n cam_name = fname_list[0]\n pid_name = fname_list[1]\n cam = int(cam_name[-1])\n pid = int(pid_name[-3:])\n temp_fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, len(identities_imgraw[pid - 1][cam - 1])))\n identities_imgraw[pid - 1][cam - 1].append(temp_fname)\n shutil.copy(fpath, osp.join(temp_images_dir, temp_fname))\n\n identities_temp = [x for x in identities_imgraw if x != [[], []]]\n identities_images = identities_temp\n\n for pid in range(len(identities_temp)):\n for cam in range(2):\n for img in range(len(identities_images[pid][cam])):\n temp_fname = identities_temp[pid][cam][img]\n fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, img))\n identities_images[pid][cam][img] = fname\n shutil.copy(osp.join(temp_images_dir, temp_fname), osp.join(images_dir, fname))\n\n shutil.rmtree(temp_images_dir)\n\n # flow information\n\n for fpath in fpaths2:\n fname = osp.basename(fpath)\n fname_list = fname.split('_')\n cam_name = fname_list[0]\n pid_name = fname_list[1]\n cam = int(cam_name[-1])\n pid = int(pid_name[-3:])\n temp_fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, len(identities_otherraw[pid - 1][cam - 1])))\n identities_otherraw[pid - 1][cam - 1].append(temp_fname)\n shutil.copy(fpath, osp.join(temp_others_dir, temp_fname))\n\n identities_temp = [x for x in identities_otherraw if x != [[], []]]\n identities_others = identities_temp\n\n for pid in range(len(identities_temp)):\n for cam in range(2):\n for img in range(len(identities_others[pid][cam])):\n temp_fname = identities_temp[pid][cam][img]\n fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, img))\n identities_others[pid][cam][img] = fname\n shutil.copy(osp.join(temp_others_dir, temp_fname), osp.join(others_dir, fname))\n\n shutil.rmtree(temp_others_dir)\n\n meta = {'name': 'iLIDS-sequence', 'shot': 'sequence', 'num_cameras': 2,\n 'identities': identities_images}\n\n write_json(meta, osp.join(self.root, 'meta.json'))\n\n # Consider fixed training and testing split\n splitmat_name = osp.join(exdir1, 'train-test people splits', 'train_test_splits_ilidsvid.mat')\n data = sio.loadmat(splitmat_name)\n person_list = data['ls_set']\n num = len(identities_images)\n splits = []\n\n for i in range(10):\n pids = (person_list[i] - 1).tolist()\n trainval_pids = sorted(pids[:num // 2])\n test_pids = sorted(pids[num // 2:])\n split = {'trainval': trainval_pids,\n 'query': test_pids,\n 'gallery': test_pids}\n splits.append(split)\n write_json(splits, osp.join(self.root, 'splits.json'))\n\n def _pluckseq_cam(self, identities, indices, seq_len, seq_str, camid):\n # --> query和gallery与 trainval不同的是\n # --> trainval是用来训练的,所以怎么处理都行\n # --> query和gallery是来模拟实际场景的,所以不能用那种重复采样的方法扩充两个数据集\n # --> 另外要求是不同镜头下的,所以加一个camid\n\n ret = []\n per_id = []\n cam_id = []\n tra_num = []\n\n for index, pid in enumerate(indices):\n pid_images = identities[pid]\n cam_images = pid_images[camid]\n seqall = len(cam_images)\n seq_inds = [(start_ind, start_ind + seq_len) for start_ind in range(0, seqall - seq_len, seq_str)]\n if not seq_inds:\n seq_inds = [(0, seqall)]\n for seq_ind in seq_inds:\n ret.append((seq_ind[0], seq_ind[1], pid, index, camid))\n per_id.append(pid)\n cam_id.append(camid)\n tra_num.append(len(seq_inds))\n return ret, per_id, cam_id, tra_num" }, { "identifier": "LSVID", "path": "datasets/set/lsvid.py", "snippet": "class LSVID(object):\n\n def __init__(self, root=None, sampling_step=48, *args, **kwargs):\n self._root = root\n self.train_name_path = osp.join(self._root, 'info/list_sequence/list_seq_train.txt')\n self.test_name_path = osp.join(self._root, 'info/list_sequence/list_seq_test.txt')\n self.query_IDX_path = osp.join(self._root, 'info/data/info_test.mat')\n\n self._check_before_run()\n\n # prepare meta data\n track_train = self._get_names(self.train_name_path)\n track_test = self._get_names(self.test_name_path)\n\n track_train = np.array(track_train)\n track_test = np.array(track_test)\n\n query_IDX = h5py.File(self.query_IDX_path, mode='r')['query'][0,:] # numpy.ndarray (1980,)\n query_IDX = np.array(query_IDX, dtype=int)\n\n query_IDX -= 1 # index from 0\n track_query = track_test[query_IDX, :]\n\n gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX]\n track_gallery = track_test[gallery_IDX, :]\n\n self.split_train_dense_json_path = osp.join(self._root,'split_train_dense_{}.json'.format(sampling_step))\n self.split_train_json_path = osp.join(self._root, 'split_train.json')\n self.split_query_json_path = osp.join(self._root, 'split_query.json')\n self.split_gallery_json_path = osp.join(self._root, 'split_gallery.json')\n\n train, num_train_tracklets, num_train_pids, num_train_imgs, num_train_cams, num_train_vids = \\\n self._process_data(track_train, json_path=self.split_train_json_path, relabel=True)\n\n train_dense, num_train_tracklets_dense, num_train_pids_dense, num_train_imgs_dense, _, _ = \\\n self._process_data(track_train, json_path=self.split_train_dense_json_path, relabel=True, sampling_step=sampling_step)\n\n query, num_query_tracklets, num_query_pids, num_query_imgs, _, _ = \\\n self._process_data(track_query, json_path=self.split_query_json_path, relabel=False)\n\n gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs, _, _ = \\\n self._process_data(track_gallery, json_path=self.split_gallery_json_path, relabel=False)\n\n num_imgs_per_tracklet = num_train_imgs + num_gallery_imgs + num_query_imgs\n min_num = np.min(num_imgs_per_tracklet)\n max_num = np.max(num_imgs_per_tracklet)\n avg_num = np.mean(num_imgs_per_tracklet)\n\n num_total_pids = num_train_pids + num_gallery_pids\n num_total_tracklets = num_train_tracklets + num_gallery_tracklets + num_query_tracklets\n\n print(\"=> LS-VID loaded\")\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # tracklets\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_tracklets))\n if sampling_step != 0:\n print(\" train_d | {:5d} | {:8d}\".format(num_train_pids_dense, num_train_tracklets_dense))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_tracklets))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_tracklets))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_tracklets))\n print(\" number of images per tracklet: {} ~ {}, average {:.1f}\".format(min_num, max_num, avg_num))\n print(\" ------------------------------\")\n\n if sampling_step != 0:\n self.train = train_dense\n else:\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n self.num_train_cams = num_train_cams\n self.num_train_vids = num_train_vids\n\n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self._root):\n raise RuntimeError(\"'{}' is not available\".format(self._root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))\n\n def _get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n basepath, pid = new_line.split(' ')\n names.append([basepath, int(pid)])\n return names\n\n def _process_data(self,\n meta_data,\n relabel=False,\n json_path=None,\n sampling_step=0):\n if osp.exists(json_path):\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['num_cams'], split['num_tracks']\n\n num_tracklets = meta_data.shape[0]\n pid_list = list(set(meta_data[:, 1].tolist()))\n num_pids = len(pid_list)\n\n if relabel: pid2label = {int(pid): label for label, pid in enumerate(pid_list)}\n tracklets = []\n num_imgs_per_tracklet = []\n cams = []\n\n for tracklet_idx in range(num_tracklets):\n tracklet_path = osp.join(self._root, meta_data[tracklet_idx, 0]) + '*'\n img_paths = glob.glob(tracklet_path) # avoid .DS_Store\n img_paths.sort()\n pid = int(meta_data[tracklet_idx, 1])\n _, _, camid, _ = osp.basename(img_paths[0]).split('_')[:4]\n cams += [int(camid)]\n camid = int(camid)\n\n if pid == -1: continue # junk images are just ignored\n assert 1 <= camid <= 15\n if relabel: pid = pid2label[pid]\n camid -= 1 # index starts from 0\n \n if sampling_step == 0:\n tracklets.append((img_paths, pid, camid, 1))\n else:\n num_sampling = len(img_paths) // sampling_step\n for idx in range(num_sampling):\n if idx == num_sampling - 1:\n tracklets.append((img_paths[idx * sampling_step:], pid, camid, 1))\n else:\n tracklets.append((img_paths[idx * sampling_step: (idx + 1) * sampling_step], pid, camid, 1))\n num_imgs_per_tracklet.append(len(img_paths))\n\n num_tracklets = len(tracklets)\n cams = set(cams)\n num_cams = len(cams)\n\n print(\"Saving split to {}\".format(json_path))\n split_dict = {'tracklets': tracklets, 'num_tracklets': num_tracklets, 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet, 'num_cams' : num_cams, 'num_tracks' : 1}\n write_json(split_dict, json_path)\n\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, num_cams, 1" } ]
import torch import utils.spatial_transforms as ST import utils.temporal_transforms as TT import utils.transforms as T import utils.seqtransforms as SeqT from torch.utils.data import DataLoader from datasets.video_loader_xh import VideoDataset from datasets.samplers import RandomIdentitySampler, RandomIdentitySamplerForSeq, RandomIdentitySamplerWYQ from datasets.seqpreprocessor import SeqTrainPreprocessor, SeqTestPreprocessor from datasets.set.mars import Mars from datasets.set.ilidsvidsequence import iLIDSVIDSEQUENCE from datasets.set.lsvid import LSVID
12,722
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE, 'lsvid': LSVID } def train_collate_fn(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, pids, camids, viewids, _ = zip(*batch) pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn(batch): imgs, pids, camids, viewids, img_paths = zip(*batch) viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def train_collate_fn_seq(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, flows, pids, camids = zip(*batch) viewids = 1 pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn_seq(batch): imgs, flows, pids, camids = zip(*batch) viewids = 1 img_paths = None viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def make_dataloader(cfg): split_id = cfg.DATASETS.SPLIT seq_srd = cfg.INPUT.SEQ_SRD seq_len = cfg.INPUT.SEQ_LEN num_workers = cfg.DATALOADER.NUM_WORKERS if cfg.DATASETS.NAMES != 'mars' and cfg.DATASETS.NAMES != 'duke' and cfg.DATASETS.NAMES != 'lsvid': dataset = __factory[cfg.DATASETS.NAMES](root=cfg.DATASETS.ROOT_DIR, split_id=split_id, seq_len=seq_len, seq_srd=seq_srd, num_val=1) num_classes = dataset.num_trainval_ids cam_num = dataset.num_train_cams view_num = dataset.num_train_vids train_set = SeqTrainPreprocessor(dataset.trainval, dataset, seq_len, transform=SeqT.Compose([SeqT.RectScale(256, 128), SeqT.RandomHorizontalFlip(), SeqT.RandomSizedEarser(), SeqT.ToTensor(), SeqT.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) train_set_normal = SeqTrainPreprocessor(dataset.trainval, dataset, seq_len, transform=SeqT.Compose([SeqT.RectScale(256, 128), SeqT.ToTensor(), SeqT.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) val_set = SeqTestPreprocessor(dataset.query + dataset.gallery, dataset, seq_len, transform=SeqT.Compose([SeqT.RectScale(256, 128), SeqT.ToTensor(), SeqT.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) train_loader_stage2 = DataLoader( train_set,
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE, 'lsvid': LSVID } def train_collate_fn(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, pids, camids, viewids, _ = zip(*batch) pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn(batch): imgs, pids, camids, viewids, img_paths = zip(*batch) viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def train_collate_fn_seq(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, flows, pids, camids = zip(*batch) viewids = 1 pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn_seq(batch): imgs, flows, pids, camids = zip(*batch) viewids = 1 img_paths = None viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def make_dataloader(cfg): split_id = cfg.DATASETS.SPLIT seq_srd = cfg.INPUT.SEQ_SRD seq_len = cfg.INPUT.SEQ_LEN num_workers = cfg.DATALOADER.NUM_WORKERS if cfg.DATASETS.NAMES != 'mars' and cfg.DATASETS.NAMES != 'duke' and cfg.DATASETS.NAMES != 'lsvid': dataset = __factory[cfg.DATASETS.NAMES](root=cfg.DATASETS.ROOT_DIR, split_id=split_id, seq_len=seq_len, seq_srd=seq_srd, num_val=1) num_classes = dataset.num_trainval_ids cam_num = dataset.num_train_cams view_num = dataset.num_train_vids train_set = SeqTrainPreprocessor(dataset.trainval, dataset, seq_len, transform=SeqT.Compose([SeqT.RectScale(256, 128), SeqT.RandomHorizontalFlip(), SeqT.RandomSizedEarser(), SeqT.ToTensor(), SeqT.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) train_set_normal = SeqTrainPreprocessor(dataset.trainval, dataset, seq_len, transform=SeqT.Compose([SeqT.RectScale(256, 128), SeqT.ToTensor(), SeqT.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) val_set = SeqTestPreprocessor(dataset.query + dataset.gallery, dataset, seq_len, transform=SeqT.Compose([SeqT.RectScale(256, 128), SeqT.ToTensor(), SeqT.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) train_loader_stage2 = DataLoader( train_set,
sampler=RandomIdentitySamplerForSeq(dataset.trainval, batch_size=cfg.SOLVER.STAGE2.IMS_PER_BATCH,
2
2023-12-11 04:03:46+00:00
16k
MarilynKeller/aitviewer-skel
aitviewer/renderables/markers.py
[ { "identifier": "Spheres", "path": "aitviewer/renderables/spheres.py", "snippet": "class Spheres(Node):\n \"\"\"Render some simple spheres.\"\"\"\n\n def __init__(\n self,\n positions,\n radius=0.01,\n color=(0.0, 0.0, 1.0, 1.0),\n rings=16,\n sectors=32,\n icon=\"\\u008d\",\n cast_shadow=False,\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param positions: A numpy array of shape (F, N, 3) or (N, 3) containing N sphere positions for F time steps.\n :param radius: Radius of the spheres.\n :param color: Color of the spheres.\n :param rings: Longitudinal resolution.\n :param sectors: Latitudinal resolution.\n \"\"\"\n if len(positions.shape) == 2:\n positions = positions[np.newaxis]\n assert len(positions.shape) == 3\n\n # Define a default material in case there is None.\n if isinstance(color, tuple) or len(color.shape) == 1:\n kwargs[\"material\"] = kwargs.get(\"material\", Material(color=color, ambient=0.2))\n self.sphere_colors = kwargs[\"material\"].color\n else:\n assert color.shape[1] == 4 and positions.shape[1] == color.shape[0]\n self.sphere_colors = color\n\n if \"n_frames\" not in kwargs:\n kwargs[\"n_frames\"] = positions.shape[0]\n super().__init__(icon=icon, **kwargs)\n\n self._sphere_positions = positions\n self.radius = radius\n\n self.vertices, self.faces = _create_sphere(radius=1.0, rings=rings, sectors=sectors)\n self.n_vertices = self.vertices.shape[0]\n self.n_spheres = self.sphere_positions.shape[1]\n\n self.draw_edges = False\n self._need_upload = True\n\n # Render passes.\n self.outline = True\n self.fragmap = True\n self.depth_prepass = True\n self.cast_shadow = cast_shadow\n\n @property\n def bounds(self):\n bounds = self.get_bounds(self.sphere_positions)\n bounds[:, 0] -= self.radius\n bounds[:, 1] += self.radius\n return bounds\n\n @property\n def current_bounds(self):\n bounds = self.get_bounds(self.current_sphere_positions)\n bounds[:, 0] -= self.radius\n bounds[:, 1] += self.radius\n return bounds\n\n @property\n def vertex_colors(self):\n if len(self._sphere_colors.shape) == 1:\n return np.full((self.n_spheres * self.n_vertices, 4), self._sphere_colors)\n else:\n return np.tile(self._sphere_colors, (self.n_vertices, 1))\n\n def color_one(self, index, color):\n new_colors = np.tile(np.array(self.material.color), (self.n_spheres, 1))\n new_colors[index] = color\n self.sphere_colors = new_colors\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.sphere_colors = color\n self.redraw()\n\n @property\n def sphere_colors(self):\n if len(self._sphere_colors.shape) == 1:\n t = np.tile(np.array(self._sphere_colors), (self.n_spheres, 1))\n return t\n else:\n return self._sphere_colors\n\n @sphere_colors.setter\n def sphere_colors(self, color):\n if isinstance(color, tuple):\n color = np.array(color)\n self._sphere_colors = color\n self.redraw()\n\n @property\n def current_sphere_positions(self):\n idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0\n return self.sphere_positions[idx]\n\n @current_sphere_positions.setter\n def current_sphere_positions(self, positions):\n assert len(positions.shape) == 2\n idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0\n self.sphere_positions[idx] = positions\n self.redraw()\n\n @property\n def sphere_positions(self):\n return self._sphere_positions\n\n @sphere_positions.setter\n def sphere_positions(self, pos):\n if len(pos.shape) == 2:\n pos = pos[np.newaxis]\n self._sphere_positions = pos\n self.n_frames = len(self._sphere_positions)\n self.redraw()\n\n def on_frame_update(self):\n self.redraw()\n\n def redraw(self, **kwargs):\n self._need_upload = True\n\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n self.prog = get_sphere_instanced_program()\n\n vs_path = \"sphere_instanced_positions.vs.glsl\"\n self.outline_program = get_outline_program(vs_path)\n self.depth_only_program = get_depth_only_program(vs_path)\n self.fragmap_program = get_fragmap_program(vs_path)\n\n self.vbo_vertices = ctx.buffer(self.vertices.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.astype(\"i4\").tobytes())\n\n self.vbo_instance_position = ctx.buffer(reserve=self.n_spheres * 12)\n self.vbo_instance_color = ctx.buffer(reserve=self.n_spheres * 16)\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_instance_position, \"3f4/i\", \"instance_position\")\n self.vao.buffer(self.vbo_instance_color, \"4f4/i\", \"instance_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n def _upload_buffers(self):\n if not self.is_renderable or not self._need_upload:\n return\n self._need_upload = False\n self.vbo_instance_position.write(self.current_sphere_positions.astype(\"f4\").tobytes())\n if len(self._sphere_colors.shape) > 1:\n self.vbo_instance_color.write(self._sphere_colors.astype(\"f4\").tobytes())\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n\n prog = self.prog\n prog[\"radius\"] = self.radius\n if len(self._sphere_colors.shape) == 1:\n prog[\"use_uniform_color\"] = True\n prog[\"uniform_color\"] = tuple(self._sphere_colors)\n else:\n prog[\"use_uniform_color\"] = False\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n prog[\"clip_control\"].value = (0, 0, 0)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_spheres)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n prog[\"radius\"] = self.radius\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_spheres)\n\n def gui(self, imgui):\n _, self.radius = imgui.drag_float(\"Radius\", self.radius, 0.01, min_value=0.001, max_value=10.0, format=\"%.3f\")\n super().gui(imgui)\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n\n def update_frames(self, positions, frames):\n self.sphere_positions[frames] = positions\n self.redraw()\n\n def add_frames(self, positions):\n if len(positions.shape) == 2:\n positions = positions[np.newaxis]\n self.sphere_positions = np.append(self.sphere_positions, positions, axis=0)\n\n def remove_frames(self, frames):\n self.sphere_positions = np.delete(self.sphere_positions, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n V = self.vertices.shape[0]\n N = self.sphere_positions.shape[0]\n M = self.n_spheres\n\n vertices = np.empty((N, V * M, 3), np.float32)\n for i in range(N):\n vs = self.vertices[np.newaxis].repeat(M, 0)\n vertices[i] = (vs * self.radius + self.sphere_positions[i].reshape(M, 1, 3)).reshape((-1, 3))\n\n fs = self.faces[np.newaxis].repeat(M, 0).reshape((M, -1))\n offsets = (np.arange(M) * V).reshape((M, 1))\n faces = (fs + offsets).reshape((-1, 3))\n\n mesh = usd.add_mesh(stage, usd_path, self.name, vertices, faces, self.get_local_transform())\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Node", "path": "aitviewer/scene/node.py", "snippet": "class Node(object):\n \"\"\"Interface for nodes.\"\"\"\n\n def __init__(\n self,\n name=None,\n icon=None,\n position=None,\n rotation=None,\n scale=1.0,\n color=(0.5, 0.5, 0.5, 1.0),\n material=None,\n is_selectable=True,\n gui_affine=True,\n gui_material=True,\n enabled_frames=None,\n n_frames=1,\n ):\n \"\"\"\n :param name: Name of the node\n :param icon: Custom Node Icon using custom Icon font\n :param position: Starting position in the format (X,Y,Z) or np array of positions with shape (F, 3)\n :param rotation: Starting rotation in rotation matrix representation (3,3) or np array of rotations with shape (F, 3, 3)\n :param scale: Starting scale (scalar) or np array of scale values with shape (F)\n :param color: (R,G,B,A) 0-1 formatted color value.\n :param material: Object material properties. The color specified in the material will override node color\n :param is_selectable: If True the node is selectable when clicked on, otherwise the parent node will be selected.\n :param gui_affine: If True the node will have transform controls (position, rotation, scale) in the GUI.\n :param gui_material: If True the node will have material controls in the GUI.\n :param enabled_frames: Numpy array of boolean values, the object will be enabled only in frames where the value is True,\n the number of ones in the mask must match the number of frames of the object.\n :param n_frames: How many frames this renderable has.\n \"\"\"\n # Transform & Animation\n position = np.zeros(3, dtype=np.float32) if position is None else np.array(position, dtype=np.float32)\n rotation = np.eye(3, dtype=np.float32) if rotation is None else np.array(rotation, dtype=np.float32)\n\n self._positions = position if len(position.shape) != 1 else position[np.newaxis]\n self._rotations = rotation if len(rotation.shape) != 2 else rotation[np.newaxis]\n self._scales = (scale if isinstance(scale, np.ndarray) else np.array([scale])).astype(np.float32)\n\n n_positions = self._positions.shape[0]\n n_rotations = self._rotations.shape[0]\n n_scales = self._scales.shape[0]\n\n if n_frames > 1:\n assert n_positions == 1 or n_frames == n_positions, (\n f\"Number of position frames\" f\" ({n_positions}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_rotations == 1 or n_frames == n_rotations, (\n f\"Number of rotations frames\" f\" ({n_rotations}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_scales == 1 or n_frames == n_scales, (\n f\"Number of scales frames\" f\" ({n_scales}) must be 1 or match number of Node frames {n_frames}\"\n )\n else:\n n_frames = max(n_positions, n_rotations, n_scales)\n assert (\n (n_positions == 1 or n_positions == n_frames)\n and (n_rotations == 1 or n_rotations == n_frames)\n and (n_scales == 1 or n_scales == n_frames)\n ), (\n f\"Number of position\"\n f\"({n_positions}), rotation ({n_rotations}) and scale ({n_scales})\"\n \"frames must be 1 or match.\"\n )\n\n # Frames\n self._n_frames = n_frames\n self._current_frame_id = 0\n self.model_matrix = self.get_local_transform()\n self._enabled_frames = enabled_frames\n if self._enabled_frames is not None:\n assert np.count_nonzero(self._enabled_frames) == n_frames, (\n f\"Number of non-zero elements in enabled_frames\"\n f\" ({np.count_nonzero(self._enabled_frames)}) must match number of frames in sequence ({n_frames})\"\n )\n # Create an array that maps from the true frame id (counting also disabled frames) to the index of the\n # first existing frame in the sequence.\n self._enabled_frame_id = np.cumsum(self._enabled_frames) - 1\n\n # Stores the true frame id (counting also disabled frames) we use this to allow going\n # through both enabled and disabled frames from the GUI.\n self._internal_frame_id = 0\n\n # Material\n self.material = Material(color=color) if material is None else material\n\n # Renderable Attributes\n self.is_renderable = False\n self.backface_culling = True\n self.backface_fragmap = False\n self.draw_outline = False\n\n # Flags to enable rendering passes\n self.cast_shadow = False\n self.depth_prepass = False\n self.fragmap = False\n self.outline = False\n\n # Programs for render passes. Subclasses are responsible for setting these.\n self.depth_only_program = None # Required for depth_prepass and cast_shadow passes\n self.fragmap_program = None # Required for fragmap pass\n self.outline_program = None # Required for outline pass\n\n # GUI\n self.name = name if name is not None else type(self).__name__\n self.uid = C.next_gui_id()\n self.unique_name = self.name + \"{}\".format(self.uid)\n self.icon = icon if icon is not None else \"\\u0082\"\n self._enabled = True\n self._expanded = False\n self.gui_controls = {\n \"affine\": {\n \"fn\": self.gui_affine,\n \"icon\": \"\\u009b\",\n \"is_visible\": gui_affine,\n },\n \"material\": {\n \"fn\": self.gui_material,\n \"icon\": \"\\u0088\",\n \"is_visible\": gui_material,\n },\n \"animation\": {\n \"fn\": self.gui_animation,\n \"icon\": \"\\u0098\",\n \"is_visible\": (lambda: self._n_frames > 1)(),\n },\n \"io\": {\n \"fn\": self.gui_io,\n \"icon\": \"\\u009a\",\n \"is_visible\": (lambda: self.gui_io.__func__ is not Node.gui_io)(),\n },\n }\n self.gui_modes = {\"view\": {\"title\": \" View\", \"fn\": self.gui_mode_view, \"icon\": \"\\u0099\"}}\n self._selected_mode = \"view\"\n self._show_in_hierarchy = True\n self.is_selectable = is_selectable\n self.export_usd_enabled = True\n self.export_usd_expanded = True\n\n self.nodes: List[Node] = []\n self.parent: Node = None\n\n # Selected Mode\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n self._selected_mode = selected_mode\n\n # Transform\n @property\n def position(self):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n return self._positions[idx]\n\n @position.setter\n def position(self, position):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n self._positions[idx] = np.array(position, dtype=np.float32).copy()\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, positions):\n self._positions = positions\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotation(self):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n return self._rotations[idx]\n\n @rotation.setter\n def rotation(self, rotation):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n self._rotations[idx] = rotation\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotations(self):\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations):\n self._rotations = rotations\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scale(self):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n return self._scales[idx]\n\n @scale.setter\n def scale(self, scale):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n self._scales[idx] = scale\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scales(self):\n return self._scales\n\n @scales.setter\n def scales(self, scales):\n self._scales = scales\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @staticmethod\n @lru_cache()\n def _compute_transform(pos, rot, scale):\n rotation = np.eye(4)\n rotation[:3, :3] = np.array(rot)\n\n trans = np.eye(4)\n trans[:3, 3] = np.array(pos)\n\n scale = np.diag([scale, scale, scale, 1])\n\n return (trans @ rotation @ scale).astype(\"f4\")\n\n def get_local_transform(self):\n \"\"\"Construct local transform as a 4x4 matrix from this node's position, orientation and scale.\"\"\"\n return self._compute_transform(tuple(self.position), tuple(map(tuple, self.rotation)), self.scale)\n\n def update_transform(self, parent_transform=None):\n \"\"\"Update the model matrix of this node and all of its descendants.\"\"\"\n if parent_transform is None:\n self.model_matrix = self.get_local_transform()\n else:\n self.model_matrix = parent_transform.astype(\"f4\") @ self.get_local_transform()\n\n for n in self.nodes:\n n.update_transform(self.model_matrix)\n\n @property\n def color(self):\n return self.material.color\n\n @color.setter\n def color(self, color):\n self.material.color = color\n\n @property\n def bounds(self):\n \"\"\"The bounds in the format ((x_min, x_max), (y_min, y_max), (z_min, z_max))\"\"\"\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_bounds(self):\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_center(self):\n return self.current_bounds.mean(-1)\n\n @property\n def center(self):\n return self.bounds.mean(-1)\n\n def get_local_bounds(self, points):\n if len(points.shape) == 2 and points.shape[-1] == 3:\n points = points[np.newaxis]\n assert len(points.shape) == 3\n\n # Compute min and max coordinates of the bounding box ignoring NaNs.\n val = np.array(\n [\n [np.nanmin(points[:, :, 0]), np.nanmax(points[:, :, 0])],\n [np.nanmin(points[:, :, 1]), np.nanmax(points[:, :, 1])],\n [np.nanmin(points[:, :, 2]), np.nanmax(points[:, :, 2])],\n ]\n )\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n def get_bounds(self, points):\n val = self.get_local_bounds(points)\n\n # Transform bounding box with the model matrix.\n val = (self.model_matrix @ np.vstack((val, np.array([1.0, 1.0]))))[:3]\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n @property\n def n_frames(self):\n return self._n_frames\n\n @n_frames.setter\n def n_frames(self, n_frames):\n self._n_frames = n_frames\n\n def __len__(self):\n return self.n_frames\n\n @property\n def current_frame_id(self):\n return self._current_frame_id\n\n @current_frame_id.setter\n def current_frame_id(self, frame_id):\n # Check if the frame changed.\n last_frame_id = self._current_frame_id if self._enabled_frames is None else self._internal_frame_id\n if self.n_frames == 1 or frame_id == last_frame_id:\n return\n\n self.on_before_frame_update()\n if self._enabled_frames is None:\n if frame_id < 0:\n self._current_frame_id = 0\n elif frame_id >= len(self):\n self._current_frame_id = len(self) - 1\n else:\n self._current_frame_id = frame_id\n else:\n # If an enabled_frames is present use it to get the current frame.\n if frame_id < 0:\n self._internal_frame_id = 0\n elif frame_id >= self._enabled_frames.shape[0]:\n self._internal_frame_id = self._enabled_frames.shape[0] - 1\n else:\n self._internal_frame_id = frame_id\n self._current_frame_id = self._enabled_frame_id[self._internal_frame_id]\n # Update enabled using the mask.\n self.enabled = self._enabled_frames[self._internal_frame_id]\n\n # Update frame id of all children nodes.\n for n in self.nodes:\n n.current_frame_id = self._current_frame_id\n\n self.on_frame_update()\n if self.parent and (self._positions.shape[0] > 1 or self._rotations.shape[0] > 1 or self._scales.shape[0] > 1):\n self.update_transform(self.parent.model_matrix)\n\n def next_frame(self):\n self.current_frame_id = self.current_frame_id + 1 if self.current_frame_id < len(self) - 1 else 0\n\n def previous_frame(self):\n self.current_frame_id = self.current_frame_id - 1 if self.current_frame_id > 0 else len(self) - 1\n\n def on_before_frame_update(self):\n \"\"\"Called when the current frame is about to change, 'self.current_frame_id' still has the id of the\n previous frame.\"\"\"\n pass\n\n def on_frame_update(self):\n \"\"\"Called when the current frame is changed.\"\"\"\n pass\n\n def add(self, *nodes, **kwargs):\n self._add_nodes(*nodes, **kwargs)\n\n def _add_node(self, n: \"Node\", show_in_hierarchy=True, expanded=False, enabled=True):\n \"\"\"\n Add a single node\n :param show_in_hierarchy: Whether to show the node in the scene hierarchy.\n :param expanded: Whether the node is initially expanded in the GUI.\n \"\"\"\n if n is None:\n return\n n._show_in_hierarchy = show_in_hierarchy\n n._expanded = expanded\n n._enabled = enabled if n._enabled_frames is None else n._enabled_frames[n.current_frame_id]\n self.nodes.append(n)\n n.parent = self\n n.update_transform(self.model_matrix)\n\n def _add_nodes(self, *nodes, **kwargs):\n \"\"\"Add multiple nodes\"\"\"\n for n in nodes:\n self._add_node(n, **kwargs)\n\n def remove(self, *nodes):\n for n in nodes:\n n.release()\n try:\n self.nodes.remove(n)\n except:\n pass\n\n @property\n def show_in_hierarchy(self):\n return self._show_in_hierarchy\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n self._enabled = enabled\n\n @property\n def expanded(self):\n return self._expanded\n\n @expanded.setter\n def expanded(self, expanded):\n self._expanded = expanded\n\n def is_transparent(self):\n \"\"\"\n Returns true if the object is transparent and should thus be sorted when rendering.\n Subclassess that use a different color should implement this method to be rendered correctly when transparent.\n \"\"\"\n return self.material.color[3] < 1.0\n\n def gui(self, imgui):\n \"\"\"\n Render GUI for custom node properties and controls. Implementation optional.\n Elements rendered here will show up in the scene hierarchy\n :param imgui: imgui context.\n See https://pyimgui.readthedocs.io/en/latest/reference/imgui.core.html for available elements to render\n \"\"\"\n pass\n\n def gui_modes(self, imgui):\n \"\"\"Render GUI with toolbar (tools) for this particular node\"\"\"\n\n def gui_animation(self, imgui):\n \"\"\"Render GUI for animation related settings\"\"\"\n\n if self._enabled_frames is None:\n if self.n_frames > 1:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self.current_frame_id,\n min_value=0,\n max_value=self.n_frames - 1,\n )\n if u:\n self.current_frame_id = fid\n else:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self._internal_frame_id,\n min_value=0,\n max_value=self._enabled_frames.shape[0] - 1,\n )\n if u:\n self.current_frame_id = fid\n\n def gui_affine(self, imgui):\n \"\"\"Render GUI for affine transformations\"\"\"\n # Position controls\n up, pos = imgui.drag_float3(\n \"Position##pos{}\".format(self.unique_name),\n *self.position,\n 1e-2,\n format=\"%.2f\",\n )\n if up:\n self.position = pos\n\n # Rotation controls\n euler_angles = rot2euler_numpy(self.rotation[np.newaxis], degrees=True)[0]\n ur, euler_angles = imgui.drag_float3(\n \"Rotation##pos{}\".format(self.unique_name),\n *euler_angles,\n 1e-2,\n format=\"%.2f\",\n )\n if ur:\n self.rotation = euler2rot_numpy(np.array(euler_angles)[np.newaxis], degrees=True)[0]\n\n # Scale controls\n us, scale = imgui.drag_float(\n \"Scale##scale{}\".format(self.unique_name),\n self.scale,\n 1e-2,\n min_value=0.001,\n max_value=100.0,\n format=\"%.3f\",\n )\n if us:\n self.scale = scale\n\n def gui_material(self, imgui):\n \"\"\"Render GUI with material properties\"\"\"\n\n # Color Control\n uc, color = imgui.color_edit4(\"Color##color{}'\".format(self.unique_name), *self.material.color)\n if uc:\n self.color = color\n\n # Diffuse\n ud, diffuse = imgui.slider_float(\n \"Diffuse##diffuse{}\".format(self.unique_name),\n self.material.diffuse,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ud:\n self.material.diffuse = diffuse\n\n # Ambient\n ua, ambient = imgui.slider_float(\n \"Ambient##ambient{}\".format(self.unique_name),\n self.material.ambient,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ua:\n self.material.ambient = ambient\n\n def gui_io(self, imgui):\n \"\"\"Render GUI for import/export\"\"\"\n pass\n\n def gui_mode_view(self, imgui):\n \"\"\"Render custom GUI for view mode\"\"\"\n pass\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.enabled = imgui.checkbox(\"Enabled\", self.enabled)\n if any([n._show_in_hierarchy for n in self.nodes]):\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n for n in self.nodes:\n if not n._show_in_hierarchy:\n continue\n if imgui.begin_menu(f\"{n.name}##{n.uid}\"):\n n.gui_context_menu(imgui, x, y)\n imgui.end_menu()\n\n # Renderable\n @staticmethod\n def once(func):\n def _decorator(self, *args, **kwargs):\n if self.is_renderable:\n return\n else:\n func(self, *args, **kwargs)\n self.is_renderable = True\n\n return _decorator\n\n def make_renderable(self, ctx):\n \"\"\"\n Prepares this object for rendering. This function must be called before `render` is used.\n :param ctx: The moderngl context.\n \"\"\"\n pass\n\n def render(self, camera, position=None, rotation=None, **kwargs):\n \"\"\"Render the current frame in this sequence.\"\"\"\n pass\n\n def render_positions(self, prog):\n \"\"\"\n Render with a VAO with only positions bound, used for shadow mapping, fragmap and depth prepass.\n \"\"\"\n pass\n\n def redraw(self, **kwargs):\n \"\"\"Perform update and redraw operations. Push to the GPU when finished. Recursively redraw child nodes\"\"\"\n for n in self.nodes:\n n.redraw(**kwargs)\n\n def set_camera_matrices(self, prog, camera, **kwargs):\n \"\"\"Set the model view projection matrix in the given program.\"\"\"\n # Transpose because np is row-major but OpenGL expects column-major.\n prog[\"model_matrix\"].write(self.model_matrix.T.astype(\"f4\").tobytes())\n prog[\"view_projection_matrix\"].write(camera.get_view_projection_matrix().T.astype(\"f4\").tobytes())\n\n def receive_shadow(self, program, **kwargs):\n \"\"\"\n Call this function if the renderable is to receive shadows.\n :param program: The shader program that can shade with shadows.\n :param kwargs: The render kwargs.\n \"\"\"\n if kwargs.get(\"shadows_enabled\", False):\n lights = kwargs[\"lights\"]\n\n for i, light in enumerate(lights):\n if light.shadow_enabled and light.shadow_map:\n light_matrix = light.mvp() @ self.model_matrix\n program[f\"dirLights[{i}].matrix\"].write(light_matrix.T.tobytes())\n\n # Bind shadowmap to slot i + 1, we reserve slot 0 for the mesh texture\n # and use slots 1 to (#lights + 1) for shadow maps\n light.shadow_map.use(location=i + 1)\n\n # Set sampler uniforms\n uniform = program[f\"shadow_maps\"]\n uniform.value = 1 if uniform.array_length == 1 else [*range(1, len(lights) + 1)]\n\n def render_shadowmap(self, light_matrix):\n if not self.cast_shadow or self.depth_only_program is None or self.color[3] == 0.0:\n return\n\n prog = self.depth_only_program\n prog[\"model_matrix\"].write(self.model_matrix.T.tobytes())\n prog[\"view_projection_matrix\"].write(light_matrix.T.tobytes())\n\n self.render_positions(prog)\n\n def render_fragmap(self, ctx, camera, uid=None):\n if not self.fragmap or self.fragmap_program is None:\n return\n\n # Transpose because np is row-major but OpenGL expects column-major.\n prog = self.fragmap_program\n self.set_camera_matrices(prog, camera)\n\n # Render with the specified object uid, if None use the node uid instead.\n prog[\"obj_id\"] = uid or self.uid\n\n if self.backface_culling or self.backface_fragmap:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n\n # If backface_fragmap is enabled for this node only render backfaces\n if self.backface_fragmap:\n ctx.cull_face = \"front\"\n\n self.render_positions(prog)\n\n # Restore cull face to back\n if self.backface_fragmap:\n ctx.cull_face = \"back\"\n\n def render_depth_prepass(self, camera, **kwargs):\n if not self.depth_prepass or self.depth_only_program is None:\n return\n\n prog = self.depth_only_program\n self.set_camera_matrices(prog, camera)\n self.render_positions(prog)\n\n def render_outline(self, ctx, camera):\n if self.outline and self.outline_program is not None:\n prog = self.outline_program\n self.set_camera_matrices(prog, camera)\n\n if self.backface_culling:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n self.render_positions(prog)\n\n # Render children node recursively.\n for n in self.nodes:\n n.render_outline(ctx, camera)\n\n def release(self):\n \"\"\"\n Release all OpenGL resources used by this node and any of its children. Subclasses that instantiate OpenGL\n objects should implement this method with '@hooked' to avoid leaking resources.\n \"\"\"\n for n in self.nodes:\n n.release()\n\n def on_selection(self, node, instance_id, tri_id):\n \"\"\"\n Called when the node is selected\n\n :param node: the node which was clicked (can be None if the selection wasn't a mouse event)\n :param instance_id: the id of the instance that was clicked, 0 if the object is not instanced\n (can be None if the selection wasn't a mouse event)\n :param tri_id: the id of the triangle that was clicked from the 'node' mesh\n (can be None if the selection wasn't a mouse event)\n \"\"\"\n pass\n\n def key_event(self, key, wnd_keys):\n \"\"\"\n Handle shortcut key presses (if you are the selected object)\n \"\"\"\n pass\n\n def update_frames(self, *args, **kwargs):\n pass\n\n def add_frames(self, *args, **kwargs):\n pass\n\n def remove_frames(self, *args, **kwargs):\n pass\n\n def _export_usd_recursively(self, stage, usd_path, directory, verbose):\n if verbose:\n print(usd_path)\n for n in self.nodes:\n if n.export_usd_enabled:\n n.export_usd(stage, usd_path, directory, verbose)\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n \"\"\"\n Export the node into an USD file. Nodes that implement this method should use\n recursively call this for every children that should also be exported.\n\n :param stage: an object of type Usd.Stage into which to export the node\n :param usd_path: the path of the parent object in the USD file scene hierarchy.\n \"\"\"\n from pxr import Gf, UsdGeom\n\n usd_path = f\"{usd_path}/{self.name.replace(' ', '_')}_{self.uid:03}\"\n\n # Transform.\n xform = UsdGeom.Xform.Define(stage, usd_path)\n a_xform = xform.AddTransformOp()\n a_xform.Set(Gf.Matrix4d(self.get_local_transform().astype(np.float64).T))\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "PointClouds", "path": "aitviewer/renderables/point_clouds.py", "snippet": "class PointClouds(Node):\n \"\"\"\n Draw a point clouds man!\n \"\"\"\n\n def __init__(\n self,\n points,\n colors=None,\n point_size=5.0,\n color=(0.0, 0.0, 1.0, 1.0),\n z_up=False,\n icon=\"\\u008c\",\n pickable=True,\n **kwargs,\n ):\n \"\"\"\n A sequence of point clouds. Each point cloud can have a varying number of points.\n :param points: Sequence of points (F, P, 3)\n :param colors: Sequence of Colors (F, C, 4) or None. If None, all points are colored according to `color`.\n :param point_size: Initial point size.\n :param color: Default color applied to all points of all frames if `colors` is not provided.\n :param z_up: If true the point cloud rotation matrix is initialized to convert from z-up to y-up data.\n \"\"\"\n assert isinstance(points, list) or isinstance(points, np.ndarray)\n if colors is not None:\n assert len(colors) == len(points)\n\n self.points = points\n super(PointClouds, self).__init__(n_frames=len(self.points), color=color, icon=icon, **kwargs)\n\n self.fragmap = pickable\n\n # Render passes\n self.outline = True\n\n self.colors = colors\n self.point_size = point_size\n self.max_n_points = max([p.shape[0] for p in self.points])\n\n self.vao = VAO(\"points\", mode=moderngl.POINTS)\n\n if z_up and not C.z_up:\n self.rotation = np.matmul(np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]), self.rotation)\n\n @property\n def points(self):\n return self._points\n\n @points.setter\n def points(self, points):\n self._points = points\n self.n_frames = len(points)\n self.max_n_points = max([p.shape[0] for p in self.points])\n\n @property\n def colors(self):\n return self._colors\n\n @colors.setter\n def colors(self, colors):\n # Colors cannot be empty\n if colors is None:\n self._colors = [self.color]\n elif isinstance(colors, tuple) and len(colors) == 4:\n self._colors = [colors]\n elif isinstance(colors, list) or isinstance(colors, np.ndarray):\n assert len(colors) == self.n_frames\n assert colors[0].shape[-1] == 4\n self._colors = colors\n else:\n raise ValueError(\"Invalid colors: {}\".format(colors))\n\n @Node.color.setter\n def color(self, color):\n \"\"\"Update the color of the point cloud.\"\"\"\n # This is a bit ill-defined because point clouds can have per-point colors, in which case we probably do\n # not want to override them with a single uniform color. We disallow this for now. The function is still useful\n # though to change the alpha, even if a point cloud has per-point colors.\n self.material.color = color\n if self.is_renderable:\n single_color = isinstance(self.colors[0], tuple) and len(self.colors[0]) == 4\n if single_color:\n self.colors = tuple(color)\n else:\n # Only update the colors if the alpha changed. Take any frame and any point to check if the alpha\n # changed because we always change every frame and every point.\n alpha_changed = abs(color[-1] - self.colors[0][0, -1]) > 0\n if alpha_changed:\n for i in range(self.n_frames):\n self.colors[i][..., -1] = color[-1]\n self.redraw()\n\n @property\n def current_points(self):\n idx = self.current_frame_id if len(self.points) > 1 else 0\n return self.points[idx]\n\n @property\n def current_colors(self):\n if len(self.colors) == 1:\n n_points = self.current_points.shape[0]\n return np.full((n_points, 4), self.colors[0])\n else:\n idx = self.current_frame_id if len(self.colors) > 1 else 0\n return self.colors[idx]\n\n @property\n def bounds(self):\n if len(self.points) == 0:\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n bounds = np.array([[np.inf, np.NINF], [np.inf, np.NINF], [np.inf, np.NINF]])\n for i in range(len(self.points)):\n b = self.get_bounds(self.points[i])\n bounds[:, 0] = np.minimum(bounds[:, 0], b[:, 0])\n bounds[:, 1] = np.maximum(bounds[:, 1], b[:, 1])\n return bounds\n\n @property\n def current_bounds(self):\n return self.get_bounds(self.current_points)\n\n def on_frame_update(self):\n \"\"\"Called whenever a new frame must be displayed.\"\"\"\n super().on_frame_update()\n self.redraw()\n\n def redraw(self, **kwargs):\n \"\"\"Upload the current frame data to the GPU for rendering.\"\"\"\n if not self.is_renderable:\n return\n\n points = self.current_points.astype(\"f4\").tobytes()\n colors = self.current_colors.astype(\"f4\").tobytes()\n\n # Resize the VBOs if necessary. This can happen if new points are set after the `make_renderable` has been\n # called.\n if self.max_n_points * 3 * 4 > self.vbo_points.size:\n self.vbo_points.orphan(self.max_n_points * 3 * 4)\n self.vbo_colors.orphan(self.max_n_points * 4 * 4)\n\n self.vbo_points.write(points)\n self.vbo_colors.write(colors)\n\n def _clear_buffer(self):\n self.vbo_points.clear()\n self.vbo_colors.clear()\n\n # noinspection PyAttributeOutsideInit\n @Node.once\n def make_renderable(self, ctx):\n ctx.point_size = self.point_size\n\n self.prog = get_simple_unlit_program()\n self.outline_program = get_outline_program(\"mesh_positions.vs.glsl\")\n self.fragmap_program = get_fragmap_program(\"mesh_positions.vs.glsl\")\n\n self.vbo_points = ctx.buffer(reserve=self.max_n_points * 3 * 4, dynamic=True)\n self.vbo_colors = ctx.buffer(reserve=self.max_n_points * 4 * 4, dynamic=True)\n self.vbo_points.write(self.current_points.astype(\"f4\").tobytes())\n self.vbo_colors.write(self.current_colors.astype(\"f4\").tobytes())\n self.vao.buffer(self.vbo_points, \"3f\", [\"in_position\"])\n self.vao.buffer(self.vbo_colors, \"4f\", [\"in_color\"])\n\n self.positions_vao = VAO(\"{}:positions\".format(self.unique_name), mode=moderngl.POINTS)\n self.positions_vao.buffer(self.vbo_points, \"3f\", [\"in_position\"])\n\n def render(self, camera, **kwargs):\n self.set_camera_matrices(self.prog, camera, **kwargs)\n # Draw only as many points as we have set in the buffer.\n self.vao.render(self.prog, vertices=len(self.current_points))\n\n def render_positions(self, prog):\n if self.is_renderable:\n self.positions_vao.render(prog, vertices=len(self.current_points))\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n self.positions_vao.release(buffer=False)" }, { "identifier": "mocap", "path": "aitviewer/utils/mocap.py", "snippet": "def clean_CMU_mocap_labels(c3dFile: nimble.biomechanics.C3D):" }, { "identifier": "clean_CMU_mocap_labels", "path": "aitviewer/utils/mocap.py", "snippet": "def clean_CMU_mocap_labels(c3dFile: nimble.biomechanics.C3D):\n \"Rename all the labels with the pattern AAAA-XX and replace them by AAAA\"\n\n c3dFile.markers = [name for name in c3dFile.markers if \"-\" not in name]\n\n markerTimesteps = c3dFile.markerTimesteps.copy()\n\n for markers_dict in markerTimesteps:\n markers_dict_clean = markers_dict.copy()\n for key in markers_dict:\n if '-' in key:\n key_clean = key.split('-')[0]\n markers_dict_clean[key_clean] = markers_dict_clean.pop(key)\n markers_dict.clear()\n markers_dict.update(markers_dict_clean)\n\n c3dFile.markerTimesteps = markerTimesteps\n\n\n return c3dFile" } ]
import numpy as np import os import pickle as pkl import tqdm import nimblephysics as nimble from aitviewer.renderables.spheres import Spheres from aitviewer.scene.node import Node from aitviewer.renderables.point_clouds import PointClouds from aitviewer.utils import mocap from aitviewer.utils.mocap import clean_CMU_mocap_labels
11,724
# Code Developed by: # Marilyn Keller, [email protected] # Do not share or distribute without permission of the author class Markers(Node): """ Draw a point clouds man! """ def __init__(self, points, markers_labels, name="Mocap data", colors=None, lengths=None, point_size=5.0, radius = 0.0075, color=(0.0, 0.0, 1.0, 1.0), as_spheres=True, **kwargs): """ A sequence of point clouds. Each point cloud can have a varying number of points. Internally represented as a list of arrays. :param points: Sequence of points (F, P, 3) :param colors: Sequence of Colors (F, C, 4) :param lengths: Length mask for each frame of points denoting the usable part of the array :param point_size: Initial point size """ # self.points = points super(Markers, self).__init__(name, n_frames=points.shape[0], color=color, **kwargs) # Check that the marker labels are sorted # markers_labels_copy = markers_labels.copy() # markers_labels_copy.sort() # assert markers_labels == markers_labels_copy self.markers_labels = markers_labels self.marker_trajectory = points # FxMx3 self.color = color if self.marker_trajectory.shape[1]>200: as_spheres = False print(f"Too many markers ({self.marker_trajectory.shape[1]}). Switching to pointcloud.") #todo fix color bug for mi, marker_name in enumerate(self.markers_labels): if colors is not None: color = tuple(colors[mi]) if as_spheres: markers_seq = Spheres(self.marker_trajectory[:,mi,:][:,np.newaxis,:], color=color, radius = radius, name=marker_name, **kwargs) else:
# Code Developed by: # Marilyn Keller, [email protected] # Do not share or distribute without permission of the author class Markers(Node): """ Draw a point clouds man! """ def __init__(self, points, markers_labels, name="Mocap data", colors=None, lengths=None, point_size=5.0, radius = 0.0075, color=(0.0, 0.0, 1.0, 1.0), as_spheres=True, **kwargs): """ A sequence of point clouds. Each point cloud can have a varying number of points. Internally represented as a list of arrays. :param points: Sequence of points (F, P, 3) :param colors: Sequence of Colors (F, C, 4) :param lengths: Length mask for each frame of points denoting the usable part of the array :param point_size: Initial point size """ # self.points = points super(Markers, self).__init__(name, n_frames=points.shape[0], color=color, **kwargs) # Check that the marker labels are sorted # markers_labels_copy = markers_labels.copy() # markers_labels_copy.sort() # assert markers_labels == markers_labels_copy self.markers_labels = markers_labels self.marker_trajectory = points # FxMx3 self.color = color if self.marker_trajectory.shape[1]>200: as_spheres = False print(f"Too many markers ({self.marker_trajectory.shape[1]}). Switching to pointcloud.") #todo fix color bug for mi, marker_name in enumerate(self.markers_labels): if colors is not None: color = tuple(colors[mi]) if as_spheres: markers_seq = Spheres(self.marker_trajectory[:,mi,:][:,np.newaxis,:], color=color, radius = radius, name=marker_name, **kwargs) else:
markers_seq = PointClouds(self.marker_trajectory[:,mi,:][:,np.newaxis,:], name=marker_name, point_size=point_size, color=color, **kwargs)
2
2023-12-07 16:13:50+00:00
16k
nexB/dejacode
dejacode/urls.py
[ { "identifier": "ComponentViewSet", "path": "component_catalog/api.py", "snippet": "class ComponentViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Component.objects.all()\n serializer_class = ComponentSerializer\n filterset_class = ComponentFilterSet\n lookup_field = \"uuid\"\n search_fields = (\n \"name\",\n \"version\",\n \"copyright\",\n \"homepage_url\",\n \"project\",\n )\n search_fields_autocomplete = (\n \"name\",\n \"version\",\n )\n ordering_fields = (\n \"name\",\n \"version\",\n \"copyright\",\n \"license_expression\",\n \"primary_language\",\n \"project\",\n \"codescan_identifier\",\n \"type\",\n \"configuration_status\",\n \"usage_policy\",\n \"curation_level\",\n \"completion_level\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = ComponentAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"type\",\n \"owner__dataspace\",\n \"configuration_status\",\n )\n .prefetch_related(\n \"licenses__category\",\n \"packages\",\n external_references_prefetch,\n )\n )" }, { "identifier": "PackageViewSet", "path": "component_catalog/api.py", "snippet": "class PackageViewSet(SendAboutFilesMixin, CreateRetrieveUpdateListViewSet):\n queryset = Package.objects.all()\n serializer_class = PackageSerializer\n filterset_class = PackageAPIFilterSet\n lookup_field = \"uuid\"\n search_fields = (\n \"filename\",\n \"project\",\n )\n search_fields_autocomplete = (\n \"type\",\n \"namespace\",\n \"name\",\n \"version\",\n \"filename\",\n )\n ordering_fields = (\n \"download_url\",\n \"filename\",\n \"size\",\n \"release_date\",\n \"primary_language\",\n \"project\",\n \"copyright\",\n \"license_expression\",\n \"usage_policy\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = PackageAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .prefetch_related(\n \"component_set__owner\",\n \"licenses__category\",\n external_references_prefetch,\n )\n )\n\n @action(detail=True)\n def about(self, request, uuid):\n package = self.get_object()\n return Response({\"about_data\": package.as_about_yaml()})\n\n @action(detail=True)\n def about_files(self, request, uuid):\n package = self.get_object()\n about_files = package.get_about_files()\n filename = self.get_filename(package)\n return self.get_zipped_response(about_files, filename)\n\n download_url_description = (\n \"A single, or list of, Download URL(s).<br><br>\"\n '<b>cURL style</b>: <code>-d \"download_url=url1&download_url=url2\"</code><br><br>'\n '<b>Python</b>: <code>data = {\"download_url\": [\"url1\", \"url2\"]}</code>'\n )\n\n add_action_schema = AutoSchema(\n manual_fields=[\n coreapi.Field(\n \"download_url\",\n required=True,\n location=\"body\",\n schema=coreschema.String(description=download_url_description),\n ),\n ]\n )\n\n @action(detail=False, methods=[\"post\"], name=\"Package Add\", schema=add_action_schema)\n def add(self, request):\n \"\"\"\n Alternative way to add a package providing only its `download_url`.\n\n Multiple URLs can be submitted through a single request.\n\n Note that this feature is intended only for publicly available open\n source packages, not your private code.\n\n DejaCode will automatically collect the `filename`, `sha1`, `md5`, and\n `size` and apply them to the package definition.\n The `package_url` will also be generated when possible.\n\n If package scanning is enabled in your dataspace, DejaCode will also\n submit the package to ScanCode.io and the results will be returned to\n the \"Scan\" detail tab of the package when that scan is complete.\n \"\"\"\n download_urls = request.POST.getlist(\"download_url\")\n if not download_urls:\n error = {\"download_url\": \"This field is required.\"}\n return Response(error, status=400)\n\n results = defaultdict(list)\n for url in download_urls:\n url = url.strip()\n package = collect_create_scan(url, request.user)\n if package:\n results[\"added\"].append(url)\n else:\n results[\"failed\"].append(url)\n\n return Response(results)" }, { "identifier": "SubcomponentViewSet", "path": "component_catalog/api.py", "snippet": "class SubcomponentViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Subcomponent.objects.all()\n serializer_class = SubcomponentSerializer\n filterset_class = SubcomponentFilterSet\n lookup_field = \"uuid\"\n search_fields = (\"notes\",)\n ordering_fields = (\n \"license_expression\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"parent\",\n \"child\",\n )\n )" }, { "identifier": "send_scan_notification", "path": "component_catalog/views.py", "snippet": "@require_POST\n@csrf_exempt\ndef send_scan_notification(request, key):\n try:\n json_data = json.loads(request.body.decode(\"utf-8\"))\n except json.JSONDecodeError:\n raise Http404\n\n user_uuid = signing.loads(key)\n if not is_uuid4(user_uuid):\n raise Http404(\"Provided key is not a valid UUID.\")\n\n user = get_object_or_404(DejacodeUser, uuid=user_uuid)\n dataspace = user.dataspace\n\n project = json_data.get(\"project\")\n input_sources = project.get(\"input_sources\")\n if not input_sources:\n raise Http404(\"Missing `input_sources` entry in provided data.\")\n download_url = input_sources[0].get(\"download_url\")\n\n package = get_object_or_404(Package, download_url=download_url, dataspace=user.dataspace)\n description = package.download_url\n\n run = json_data.get(\"run\")\n scan_status = run.get(\"status\")\n\n update_package_from_scan = all(\n [\n dataspace.enable_package_scanning,\n dataspace.update_packages_from_scan,\n scan_status.lower() == \"success\",\n ]\n )\n\n # Triggers the Package data automatic update from Scan results, if enabled.\n if update_package_from_scan:\n scancodeio = ScanCodeIO(user)\n updated_fields = scancodeio.update_from_scan(package, user)\n if updated_fields:\n description = (\n f'Automatically updated {\", \".join(updated_fields)} from scan results\\n'\n + description\n )\n\n notify.send(\n sender=user,\n verb=f\"Scan {scan_status}\",\n action_object=package,\n recipient=user,\n description=description,\n )\n\n return JsonResponse({\"message\": \"Notification created\"})" }, { "identifier": "two_factor", "path": "dje/two_factor.py", "snippet": "TWOFA_USER_SESSION_KEY = \"_2fa_user_id\"\nclass TwoFactorEnableForm(OTPAuthenticationFormMixin, forms.Form):\nclass TwoFactorDisableForm(OTPTokenForm):\nclass TwoFactorVerifyForm(OTPTokenForm):\nclass EnableView(\n LoginRequiredMixin,\n FormView,\n):\nclass LoginView(DefaultLoginView):\nclass VerifyView(DefaultLoginView):\nclass DisableView(\n LoginRequiredMixin,\n FormView,\n):\n def __init__(self, user, key, *args, **kwargs):\n def bin_key(self):\n def clean_token(self):\n def save(self):\n def helper(self):\n def helper(self):\n def helper(self):\n def dispatch(self, request, *args, **kwargs):\n def get(self, request, *args, **kwargs):\n def get_key(self):\n def get_form_kwargs(self):\n def get_qr_code(self):\n def get_context_data(self, **kwargs):\n def form_valid(self, form):\n def form_invalid(self, form):\n def form_valid(self, form):\n def get_form_kwargs(self):\n def form_valid(self, form):\n def dispatch(self, request, *args, **kwargs):\n def get(self, request, *args, **kwargs):\n def get_form_kwargs(self):\n def form_valid(self, form):" }, { "identifier": "dejacode_site", "path": "dje/admin.py", "snippet": "EXTERNAL_SOURCE_LOOKUP = \"external_references__external_source_id\"\nADDITION = History.ADDITION\nCHANGE = History.CHANGE\nDELETION = History.DELETION\n HIDDEN_VALUE = \"*******\"\nclass DejaCodeAdminSite(AdminSite):\nclass ReferenceOnlyPermissions:\nclass DataspacedFKMixin:\nclass ProtectedFieldsMixin:\nclass ChangelistPopupPermissionMixin:\nclass ProhibitDataspaceLookupMixin:\nclass AdvancedSearchAdminMixin:\nclass HistoryAdminMixin:\nclass ColoredIconAdminMixin:\n class Media:\nclass DataspacedChangeList(ChangeList):\nclass DataspacedAdmin(\n DataspacedFKMixin,\n ProtectedFieldsMixin,\n AdvancedSearchAdminMixin,\n HistoryAdminMixin,\n admin.ModelAdmin,\n):\nclass HiddenValueWidget(forms.TextInput):\nclass DataspaceConfigurationForm(forms.ModelForm):\nclass DataspaceConfigurationInline(DataspacedFKMixin, admin.StackedInline):\nclass DataspaceAdmin(\n ReferenceOnlyPermissions,\n HistoryAdminMixin,\n admin.ModelAdmin,\n):\nclass ChildRelationshipInline(DataspacedFKMixin, admin.TabularInline):\nclass ExternalReferenceInline(DataspacedFKMixin, GenericTabularInline):\nclass ExternalSourceAdmin(DataspacedAdmin):\nclass DejacodeUserChangeForm(forms.ModelForm):\n class Meta:\nclass DejacodeUserCreationForm(DejacodeUserChangeForm):\nclass DejacodeUserAdmin(\n DataspacedFKMixin,\n AdvancedSearchAdminMixin,\n HistoryAdminMixin,\n UserAdmin,\n):\nclass GroupAdmin(ReferenceOnlyPermissions, HistoryAdminMixin, GroupAdmin):\n class ReferenceAccessAttemptAdmin(ReferenceOnlyPermissions, AccessAttemptAdmin):\n def get_urls(self):\ndef get_hierarchy_link(obj):\ndef get_additional_information_fieldset(pre_fields=None):\n def has_add_permission(self, request):\n def has_change_permission(self, request, obj=None):\n def has_delete_permission(self, request, obj=None):\n def has_view_permission(self, request, obj=None):\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n def get_readonly_fields(self, request, obj=None):\n def has_change_permission(self, request, obj=None):\n def lookup_allowed(self, lookup, value):\n def check(self, **kwargs):\n def get_queryset(self, request):\n def get_search_results(self, request, queryset, search_term):\n def log_addition(self, request, object, change_message=None):\n def log_change(self, request, object, message):\n def log_deletion(self, request, object, object_repr):\n def history_view(self, request, object_id, extra_context=None):\n def colored_icon(self, obj):\n def get_results(self, request):\n def has_filters_activated(self):\n def get_filters_params(self, params=None):\n def set_reference_link(self, request):\n def __init__(self, model, admin_site):\n def check(self, **kwargs):\n def changeform_view_on_site(self, obj):\n def changelist_view_on_site(self, obj):\n def urn_link(self, instance):\n def get_queryset(self, request):\n def get_changelist(self, request, **kwargs):\n def get_list_filter(self, request):\n def get_readonly_fields(self, request, obj=None):\n def change_view(self, request, object_id, form_url=\"\", extra_context=None):\n def render_change_form(self, request, context, add=False, change=False, form_url=\"\", obj=None):\n def get_selected_ids_from_request(request, queryset):\n def base_action_with_redirect(self, request, queryset, viewname):\n def copy_to(self, request, queryset):\n def compare_with(self, request, queryset):\n def check_updates_in_reference(self, request, queryset):\n def check_newer_version_in_reference(self, request, queryset):\n def base_check_in_reference_action(request, model_class, orm_lookups):\n def get_changes_details(form):\n def save_model(self, request, obj, form, change):\n def save_formset(self, request, form, formset, change):\n def delete_model(self, request, obj):\n def delete_queryset(self, request, queryset):\n def get_urls(self):\n def get_form(self, request, obj=None, **kwargs):\n def get_fieldsets(self, request, obj=None):\n def get_inline_instances(self, request, obj=None):\n def get_actions(self, request):\n def copy_link(self, obj):\n def hide_display_links(request):\n def get_list_display(self, request):\n def get_list_display_links(self, request, list_display):\n def response_change(self, request, obj):\n def lookup_allowed(self, lookup, value):\n def _limited_permission(request, obj, has_perm):\n def has_add_permission(self, request):\n def has_change_permission(self, request, obj=None):\n def has_delete_permission(self, request, obj=None):\n def has_view_permission(self, request, obj=None):\n def has_importer(self):\n def has_activity_log(self):\n def render(self, name, value, attrs=None, renderer=None):\n def __init__(self, *args, **kwargs):\n def clean(self):\n def has_change_permission(self, request, obj=None):\n def get_readonly_fields(self, request, obj=None):\n def get_urls(self):\n def get_queryset(self, request):\n def get_actions(self, request):\n def changeform_view(self, request, object_id=None, form_url=\"\", extra_context=None):\n def references(self, obj):\ndef send_activation_email(user, request):\n def __init__(self, *args, **kwargs):\n def save(self, commit=True):\n def get_form(self, request, obj=None, **kwargs):\n def get_queryset(self, request):\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n def user_change_password(self, request, id, form_url=\"\"):\n def get_list_filter(self, request):\n def has_activity_log(self):\n def get_urls(self):\n def log_addition(self, request, object, change_message=None):\n def delete_model(self, request, obj):\n def get_actions(self, request):\n def set_inactive(self, request, queryset):\n def export_as_csv(self, request, queryset):\n def send_activation_email(self, request, object_id):\n def get_queryset(self, request):\n def get_permissions(self, obj):\n def formfield_for_manytomany(self, db_field, request=None, **kwargs):\n def get_urls(self):\n def get_permission_group_mapping():\n def permission_details_view(self, request):\n def permission_export_csv(self, request):\ndef register_axes_admin():" }, { "identifier": "ExternalReferenceViewSet", "path": "dje/api.py", "snippet": "class ExternalReferenceViewSet(ExtraPermissionsViewSetMixin, CreateRetrieveUpdateListViewSet):\n queryset = ExternalReference.objects.all()\n serializer_class = ExternalReferenceSerializer\n lookup_field = \"uuid\"\n filterset_class = ExternalReferenceFilterSet\n extra_permissions = (TabPermission,)\n search_fields = (\"external_id\",)\n ordering_fields = (\n \"external_source\",\n \"created_date\",\n \"last_modified_date\",\n )\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .scope(self.request.user.dataspace)\n .select_related(\"content_type\")\n .prefetch_related(\"content_object\")\n )" }, { "identifier": "DejaCodeAuthenticationForm", "path": "dje/forms.py", "snippet": "class DejaCodeAuthenticationForm(AuthenticationForm):\n \"\"\"Login form.\"\"\"\n\n use_required_attribute = False\n\n @property\n def helper(self):\n helper = FormHelper()\n helper.form_id = \"sign-in\"\n helper.form_action = \"login\"\n helper.form_method = \"post\"\n helper.form_tag = False\n\n fields = [\n Field(\"username\", css_class=\"input-block-level mb-3\", placeholder=_(\"Username\")),\n Field(\"password\", css_class=\"input-block-level mb-3\", placeholder=_(\"Password\")),\n Div(\n StrictSubmit(\"submit\", _(\"Sign in\"), css_class=\"btn-warning\"),\n css_class=\"d-grid\",\n ),\n ]\n\n helper.add_layout(Layout(Fieldset(\"\", *fields)))\n return helper\n\n def get_invalid_login_error(self):\n username = self.cleaned_data.get(\"username\")\n if \"@\" in username:\n return ValidationError(\n \"Be sure to enter your DejaCode username rather than your email \"\n \"address to sign in to DejaCode.\"\n )\n return super().get_invalid_login_error()" }, { "identifier": "DejaCodeActivationView", "path": "dje/registration.py", "snippet": "class DejaCodeActivationView(ActivationView):\n def get_success_url(self, user=None):\n \"\"\"Add support for 'Sign Up' registration and User creation in admin.\"\"\"\n if user.has_usable_password():\n # User created from registration process\n return self.success_url\n\n # User created in the admin addition view\n return self.get_password_reset_confirm_url(user)\n\n @staticmethod\n def get_password_reset_confirm_url(user):\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n token_generator = PasswordResetTokenGenerator()\n token = token_generator.make_token(user)\n return reverse(\"password_reset_confirm\", args=(uid, token))\n\n def get_user(self, username):\n \"\"\"\n Remove the `already_activated` exception from original method.\n\n The activation link is valid and usable until the\n `ACCOUNT_ACTIVATION_DAYS` period is expired.\n\n This is required, for a user created by an admin user, to reach\n the \"set password\" form even if the activation URL was already\n requested (by an email service for example).\n \"\"\"\n User = get_user_model()\n try:\n user = User.objects.get(\n **{\n User.USERNAME_FIELD: username,\n }\n )\n return user\n except User.DoesNotExist:\n raise ActivationError(self.BAD_USERNAME_MESSAGE, code=\"bad_username\")" }, { "identifier": "DejaCodeRegistrationForm", "path": "dje/registration.py", "snippet": "class DejaCodeRegistrationForm(RegistrationFormUniqueEmail):\n \"\"\"Used in `registration.backends.hmac.views.RegistrationView`.\"\"\"\n\n use_required_attribute = False\n hcaptcha = hCaptchaField()\n\n class Meta(RegistrationFormUniqueEmail.Meta):\n model = User\n fields = [\n \"username\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"company\",\n \"password1\",\n \"hcaptcha\",\n \"updates_email_notification\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if \"password2\" in self.fields:\n del self.fields[\"password2\"]\n\n self.fields[\"username\"].validators.append(validators.MinLengthValidator(3))\n\n placeholders = {\n \"username\": _(\"Username\"),\n \"email\": _(\"Email address\"),\n \"first_name\": _(\"First name\"),\n \"last_name\": _(\"Last name\"),\n \"company\": _(\"Company\"),\n \"password1\": _(\"Choose a password\"),\n }\n for field_name, placeholder in placeholders.items():\n self.fields[field_name].widget.attrs[\"placeholder\"] = placeholder\n\n self.fields[\"first_name\"].required = True\n self.fields[\"last_name\"].required = True\n self.fields[\"company\"].required = True\n\n self.fields[\"hcaptcha\"].label = \"\"\n\n self.fields[\n \"updates_email_notification\"\n ].label = \"Receive updates on DejaCode features and news\"\n\n for field in self.fields.values():\n field.help_text = None\n\n @property\n def helper(self):\n helper = FormHelper()\n helper.form_id = \"registration\"\n helper.form_method = \"post\"\n helper.form_action = \"django_registration_register\"\n helper.attrs = {\"autocomplete\": \"off\"}\n\n eula = HTML(\n '<p class=\"eula\">By clicking on \"Create account\" below, you are agreeing '\n 'to our <a href=\"/eula/\">EULA</a>.</p>'\n )\n\n helper.layout = Layout(\n Fieldset(\n None,\n Field(\"username\", css_class=\"input-block-level\"),\n Field(\"email\", css_class=\"input-block-level\"),\n Div(\n Div(Field(\"first_name\"), css_class=\"col ps-0\"),\n Div(Field(\"last_name\"), css_class=\"col pe-0\"),\n css_class=\"row m-0\",\n ),\n Field(\"company\", css_class=\"input-block-level\"),\n Field(\n \"password1\",\n css_class=\"input-block-level\",\n autocomplete=\"new-password\",\n ),\n Div(\n Field(\"updates_email_notification\"),\n css_class=\"alert alert-primary px-2\",\n ),\n \"hcaptcha\",\n eula,\n Div(\n StrictSubmit(\n \"submit\",\n _(\"Create your account\"),\n css_class=\"btn btn-warning\",\n ),\n css_class=\"d-grid\",\n ),\n ),\n )\n\n return helper\n\n def clean_password1(self):\n password1 = self.cleaned_data.get(\"password1\")\n self.instance.username = self.cleaned_data.get(\"username\")\n password_validation.validate_password(password1, self.instance)\n return password1\n\n def save(self, commit=True):\n \"\"\"Add the default Dataspace on the user instance before saving.\"\"\"\n user = super().save(commit=False)\n\n user.dataspace, _ = Dataspace.objects.get_or_create(name=REGISTRATION_DEFAULT_DATASPACE)\n user.is_active = False\n if REGISTRATION_DEFAULT_IS_STAFF:\n user.is_staff = True\n user.save()\n\n for group_name in REGISTRATION_DEFAULT_GROUPS:\n with suppress(Group.DoesNotExist):\n user.groups.add(Group.objects.get(name=group_name))\n\n self.send_notification_email_to_admins(user)\n History.log_addition(user, user)\n return user\n\n @staticmethod\n def send_notification_email_to_admins(user):\n subject = \"[DejaCode] New User registration\"\n message = f\"New registration for user {user.username} {user.email}\"\n send_mail_to_admins_task.delay(subject, message)" }, { "identifier": "AccountProfileView", "path": "dje/views.py", "snippet": "class AccountProfileView(\n LoginRequiredMixin,\n FormView,\n):\n template_name = \"account/profile.html\"\n form_class = AccountProfileForm\n success_url = reverse_lazy(\"account_profile\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"user_has_device\": django_otp.user_has_device(self.request.user),\n }\n )\n\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.request.user\n return kwargs\n\n def form_valid(self, form):\n if getattr(form, \"changed_data\", None):\n form.save()\n messages.success(self.request, _(\"Profile updated.\"))\n return super().form_valid(form)\n\n def post(self, request, *args, **kwargs):\n \"\"\"Add the ability to regenerate the API key.\"\"\"\n if request.POST.get(\"regenerate-api-key\"):\n request.user.regenerate_api_key()\n messages.success(request, _(\"Your API key was regenerated.\"))\n return self.form_valid(None)\n\n return super().post(request, *args, **kwargs)" }, { "identifier": "AllNotificationsList", "path": "dje/views.py", "snippet": "class AllNotificationsList(\n NotificationsCountMixin,\n notifications_views.AllNotificationsList,\n):\n pass" }, { "identifier": "DataspaceAwareAutocompleteLookup", "path": "dje/views.py", "snippet": "class DataspaceAwareAutocompleteLookup(AutocompleteLookup):\n \"\"\"\n Extend Grappelli's ``AutocompleteLookup`` view class so that the data\n it Return is scoped to the current user's dataspace.\n\n The correct behavior is for the autocomplete results to be scoped to the\n dataspace of the object that the user is editing.\n Otherwise if the user is a member of the reference dataspace, the autocomplete\n results will contain objects from other dataspaces.\n In case the user is in the reference dataspace, we use the HTTP_REFERER from\n the request to determine the edited object and scope the result to its\n dataspace.\n The proper way to do this should be to patch Grappelli's JavaScript source\n code to pass parameters about the edited object in the ajax request.\n\n https://github.com/sehmaschine/django-grappelli/issues/362\n\n The security scoping is applied when the related manager is flagged as `is_secured`.\n \"\"\"\n\n def set_dataspace_scope(self, qs):\n \"\"\"\n Limit the queryset scope to the user dataspace.\n If the user is a reference dataspace user, he may been editing an object\n from another dataspace, in that case we are trying to limit the\n results to this dataspace.\n \"\"\"\n user_dataspace = self.request.user.dataspace\n\n # If the user is a reference dataspace user, we look into the the `HTTP_REFERER`\n # to determine if he's looking at another dataspace object.\n if user_dataspace.is_reference:\n instance = get_instance_from_referer(self.request)\n if instance:\n return qs.scope(instance.dataspace)\n\n return qs.scope(user_dataspace)\n\n def get_annotated_queryset(self, qs):\n \"\"\"\n Add some annotations to assist the search fields defined in\n GRAPPELLI_AUTOCOMPLETE_SEARCH_FIELDS.\n \"\"\"\n if self.model._meta.model_name in [\"product\", \"component\"]:\n qs = qs.annotate(name_version=Concat(\"name\", Value(\" \"), \"version\"))\n\n if self.model._meta.model_name == \"package\":\n qs = qs.annotate(\n type_name_version=Concat(\"type\", Value(\" \"), \"name\", Value(\" \"), \"version\"),\n )\n\n return qs\n\n def get_searched_queryset(self, qs):\n \"\"\"\n Add support for search `Package` directly from a Package URL input term\n such as: 'pkg:type/name@version'.\n \"\"\"\n term = self.GET.get(\"term\")\n if self.model._meta.model_name == \"package\" and term.startswith(\"pkg:\"):\n return qs.for_package_url(term)\n\n return super().get_searched_queryset(qs)\n\n def get_queryset(self):\n if is_secured(self.model._default_manager):\n perm = get_permission_codename(\"change\", self.model._meta)\n qs = self.model._default_manager.get_queryset(self.request.user, perm)\n else:\n qs = self.set_dataspace_scope(self.model._default_manager.all())\n\n qs = self.get_annotated_queryset(qs)\n qs = self.get_filtered_queryset(qs)\n qs = self.get_searched_queryset(qs)\n return qs.distinct()" }, { "identifier": "DataspaceAwareRelatedLookup", "path": "dje/views.py", "snippet": "class DataspaceAwareRelatedLookup(RelatedLookup):\n \"\"\"\n Rxtend Grappelli's ``RelatedLookup`` view class so that the data it\n Return is scoped to the current user's dataspace.\n\n The security scoping is applied when the related manager is flagged as `is_secured`.\n \"\"\"\n\n def get_queryset(self):\n if is_secured(self.model._default_manager):\n perm = get_permission_codename(\"change\", self.model._meta)\n qs = self.model._default_manager.get_queryset(self.request.user, perm)\n qs = self.get_filtered_queryset(qs)\n else:\n qs = super().get_queryset()\n\n user_dataspace = self.request.user.dataspace\n if not user_dataspace.is_reference:\n qs = qs.scope(user_dataspace)\n return qs" }, { "identifier": "GlobalSearchListView", "path": "dje/views.py", "snippet": "class GlobalSearchListView(AcceptAnonymousMixin, TemplateView):\n template_name = \"global_search.html\"\n SearchResult = namedtuple(\"SearchResult\", [\"object_list\", \"paginator_count\"])\n\n def get_list_view_results(self, view_class, dataspace):\n request = RequestFactory().get(\"\", self.request.GET)\n # Fake User.dataspace using deepcopy() to avoid any side-effects on the UI.\n request.user = copy.deepcopy(self.request.user)\n request.user.dataspace = dataspace\n request.session = {}\n response = view_class.as_view()(request)\n return self.SearchResult(\n object_list=response.context_data[\"object_list\"],\n paginator_count=response.context_data[\"paginator\"].count,\n )\n\n def get_context_data(self, **kwargs):\n # Avoid circular references\n from component_catalog.views import ComponentListView\n from component_catalog.views import PackageListView\n from license_library.views import LicenseListView\n from organization.views import OwnerListView\n from product_portfolio.views import ProductListView\n\n get_result = self.get_list_view_results\n context = super().get_context_data(**kwargs)\n search_query = self.request.GET.get(\"q\", \"\")\n if not search_query:\n return context\n\n user = self.request.user\n user_dataspace = user.dataspace\n reference_dataspace = Dataspace.objects.get_reference()\n\n context.update(\n {\n \"search_query\": search_query,\n \"component_results\": get_result(ComponentListView, user_dataspace),\n \"package_results\": get_result(PackageListView, user_dataspace),\n \"license_results\": get_result(LicenseListView, user_dataspace),\n \"owner_results\": get_result(OwnerListView, user_dataspace),\n }\n )\n\n include_products = all(\n [\n user.is_authenticated,\n user.has_perm(\"product_portfolio.view_product\"),\n ]\n )\n\n if include_products:\n context.update(\n {\n \"include_products\": True,\n \"product_results\": get_result(ProductListView, user_dataspace),\n }\n )\n\n insert_reference_data = all(\n [\n self.request.user.is_authenticated,\n reference_dataspace,\n user_dataspace != reference_dataspace,\n ]\n )\n\n if insert_reference_data:\n context.update(\n {\n \"reference_component_results\": get_result(\n ComponentListView, reference_dataspace\n ),\n \"reference_license_results\": get_result(LicenseListView, reference_dataspace),\n \"reference_package_results\": get_result(PackageListView, reference_dataspace),\n \"reference_owner_results\": get_result(OwnerListView, reference_dataspace),\n \"reference_dataspace\": reference_dataspace,\n }\n )\n\n context[\"include_purldb\"] = all(\n [user_dataspace.enable_purldb_access, PurlDB(user).is_available()]\n )\n\n return context" }, { "identifier": "IntegrationsStatusView", "path": "dje/views.py", "snippet": "class IntegrationsStatusView(\n LoginRequiredMixin,\n IsStaffMixin,\n TemplateView,\n):\n template_name = \"integrations_status.html\"\n # Make sure additional integration have a `module.label` set\n # along the `is_configured` and `is_available` functions.\n integrations = [\n ScanCodeIO,\n PurlDB,\n VulnerableCode,\n ]\n\n def get_integration_status(self, integration_class):\n \"\"\"\n Return the current status of the provided `integration_module`.\n Only check the availability if the integration is configured.\n \"\"\"\n is_configured = False\n is_available = False\n error_log = \"\"\n\n integration = integration_class(user=self.request.user)\n\n if integration.is_configured():\n is_configured = True\n try:\n is_available = integration.is_available(raise_exceptions=True)\n except Exception as exception:\n error_log = str(exception)\n\n status = {\n \"is_configured\": is_configured,\n \"is_available\": is_available,\n \"error_log\": error_log,\n }\n\n if self.request.user.is_superuser:\n status[\"service_url\"] = integration.service_url\n\n return status\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n integrations_status = {\n integration_class.label: self.get_integration_status(integration_class)\n for integration_class in self.integrations\n }\n\n context.update(\n {\n \"integrations_status\": integrations_status,\n }\n )\n return context" }, { "identifier": "UnreadNotificationsList", "path": "dje/views.py", "snippet": "class UnreadNotificationsList(\n NotificationsCountMixin,\n notifications_views.UnreadNotificationsList,\n):\n pass" }, { "identifier": "home_view", "path": "dje/views.py", "snippet": "@login_required\ndef home_view(request):\n \"\"\"Dataspace homepage.\"\"\"\n documentation_urls = {}\n rtd_url = \"https://dejacode.readthedocs.io/en/latest\"\n tutorials_label = format_html('Tutorials <span class=\"badge text-bg-success\">New</span>')\n\n documentation_urls = {\n tutorials_label: f\"{rtd_url}/tutorial-1.html\",\n \"How-To videos\": \"https://www.youtube.com/playlist?list=PLCq_LXeUqhkQj0u7M26fSHt1ebFhNCpCv\",\n \"API documentation\": reverse(\"api-docs:docs-index\"),\n }\n\n support_urls = {\n \"Report an issue\": \"https://github.com/nexB/dejacode/issues/new/\",\n }\n\n sections = {\n \"Documentation\": documentation_urls,\n \"Support\": support_urls,\n }\n\n user = request.user\n if user.is_staff:\n documentation_urls[\"Models documentation\"] = reverse(\"admin:docs_models\")\n\n request_qs = Request.objects.for_list_view(user).open().order_by(\"-last_modified_date\")\n\n cards = []\n homepage_layout = user.get_homepage_layout()\n if homepage_layout:\n cards = homepage_layout.cards_with_objects(user)\n\n context = {\n \"sections\": sections,\n \"request_assigned_to_me\": request_qs.assigned_to(user),\n \"request_followed_by_me\": request_qs.followed_by(user),\n \"cards\": cards,\n }\n return render(request, \"dataspace_home.html\", context)" }, { "identifier": "index_dispatch", "path": "dje/views.py", "snippet": "@accept_anonymous\ndef index_dispatch(request):\n \"\"\"Redirect to the LOGIN_REDIRECT_URL.\"\"\"\n return redirect(settings.LOGIN_REDIRECT_URL)" }, { "identifier": "urn_resolve_view", "path": "dje/views.py", "snippet": "@accept_anonymous\ndef urn_resolve_view(request, urn=None):\n \"\"\"\n Given a URN, this view Return the details page of the Object.\n The URN needs to be well formatted and to target an existing Object.\n If not, an error page is returned.\n See the URN module for details on supported models.\n \"\"\"\n # Supports value from the URL or submitted by the form in the urn_resolve.html template\n urn = urn or request.GET.get(\"urn\")\n if not urn:\n return render(request, \"urn_resolve.html\")\n\n try:\n # The resolve method will return the corresponding Object\n instance = urn_resolve(urn, request.user.dataspace)\n # Redirecting the user to the details page of the Object\n return redirect(instance.get_absolute_url())\n except URNValidationError as e:\n error_message = e\n except ObjectDoesNotExist:\n # URN format and model is correct, but the Object request do no exists\n error_message = \"The requested Object does not exist.\"\n except AttributeError:\n # The get_absolute_url() method is not implemented for this Model,\n # We do not have a details view for this Model.\n error_message = \"Unsupported URN model.\"\n\n return render(\n request,\n \"urn_resolve.html\",\n {\n \"error\": error_message,\n \"urn\": urn,\n },\n )" }, { "identifier": "LicenseAnnotationViewSet", "path": "license_library/api.py", "snippet": "class LicenseAnnotationViewSet(mixins.DestroyModelMixin, CreateRetrieveUpdateListViewSet):\n queryset = LicenseAnnotation.objects.all()\n serializer_class = LicenseAnnotationSerializer\n pagination_class = LicenseAnnotationPagination\n filterset_class = LicenseAnnotationFilterSet\n lookup_field = \"id\"\n renderer_classes = [renderers.JSONRenderer]\n permission_classes = [\n IsAuthenticatedOrAnonymous,\n permissions.DjangoModelPermissionsOrAnonReadOnly,\n ]\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"license\",\n \"assigned_tag\",\n \"assigned_tag__license_tag\",\n )\n .order_by(\"id\")\n )\n\n @staticmethod\n def log_action(request, obj, message):\n History.log_change(request.user, obj, message)\n\n @staticmethod\n def construct_change_message(annotation, action):\n \"\"\"\n Create a message suitable for the LogEntry model.\n Similar to messages from ModelAdmin.construct_change_message()\n \"\"\"\n if annotation.assigned_tag:\n tag_message = f'for tag: \"{annotation.assigned_tag}\"'\n else:\n tag_message = \"without tag\"\n\n return \"{action} a {name} {tag_message}.\".format(\n action=action,\n name=str(annotation._meta.verbose_name),\n tag_message=str(tag_message),\n )\n\n def perform_create(self, serializer):\n # WARNING: bypassing the direct super() on purpose\n super(CreateRetrieveUpdateListViewSet, self).perform_create(serializer)\n message = self.construct_change_message(serializer.instance, \"Added\")\n self.log_action(self.request, serializer.instance.license, message)\n\n def perform_update(self, serializer):\n # WARNING: bypassing the direct super() on purpose\n super(CreateRetrieveUpdateListViewSet, self).perform_create(serializer)\n message = self.construct_change_message(serializer.instance, \"Changed\")\n self.log_action(self.request, serializer.instance.license, message)\n\n def perform_destroy(self, instance):\n super().perform_destroy(instance)\n message = self.construct_change_message(instance, \"Deleted\")\n self.log_action(self.request, instance.license, message)" }, { "identifier": "LicenseViewSet", "path": "license_library/api.py", "snippet": "class LicenseViewSet(CreateRetrieveUpdateListViewSet):\n queryset = License.objects.all()\n serializer_class = LicenseSerializer\n filterset_class = LicenseFilterSet\n lookup_field = \"uuid\"\n search_fields = (\n \"key\",\n \"name\",\n \"short_name\",\n \"spdx_license_key\",\n )\n ordering_fields = (\n \"key\",\n \"name\",\n \"short_name\",\n \"publication_year\",\n \"category\",\n \"license_style\",\n \"license_profile\",\n \"usage_policy\",\n \"curation_level\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = LicenseAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"license_profile\",\n \"license_style\",\n \"category\",\n \"owner__dataspace\",\n \"license_status\",\n \"usage_policy\",\n )\n .prefetch_related(\n \"licenseassignedtag_set__license_tag\",\n external_references_prefetch,\n )\n )" }, { "identifier": "OwnerViewSet", "path": "organization/api.py", "snippet": "class OwnerViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Owner.objects.all()\n serializer_class = OwnerSerializer\n lookup_field = \"uuid\"\n filterset_class = OwnerFilterSet\n search_fields = (\n \"name\",\n \"alias\",\n \"notes\",\n )\n search_fields_autocomplete = (\"name\",)\n ordering_fields = (\n \"name\",\n \"alias\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = OwnerAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .prefetch_related(\n \"license_set\",\n \"component_set\",\n external_references_prefetch,\n )\n )" }, { "identifier": "UsagePolicyViewSet", "path": "policy/api.py", "snippet": "class UsagePolicyViewSet(ExtraPermissionsViewSetMixin, CreateRetrieveUpdateListViewSet):\n queryset = UsagePolicy.objects.all()\n serializer_class = UsagePolicySerializer\n lookup_field = \"uuid\"\n extra_permissions = (TabPermission,)\n search_fields = (\n \"label\",\n \"guidelines\",\n )\n ordering_fields = (\"label\",)\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"content_type\",\n \"associated_product_relation_status\",\n )\n )" }, { "identifier": "CodebaseResourceViewSet", "path": "product_portfolio/api.py", "snippet": "class CodebaseResourceViewSet(ProductRelatedViewSet):\n queryset = CodebaseResource.objects.none()\n serializer_class = CodebaseResourceSerializer\n filterset_class = CodebaseResourceFilterSet\n search_fields = (\"path\",)\n ordering_fields = (\n \"path\",\n \"is_deployment_path\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"product_component__component\",\n \"product_package__package\",\n )\n .prefetch_related(\n \"related_deployed_from__deployed_from\",\n # This one is different from the `default_select_prefetch` as its using the m2m\n \"deployed_to__deployed_to\",\n \"product\",\n )\n )" }, { "identifier": "ProductComponentViewSet", "path": "product_portfolio/api.py", "snippet": "class ProductComponentViewSet(ProductRelationViewSet):\n relation_fk_field = \"component\"\n queryset = ProductComponent.objects.none()\n serializer_class = ProductComponentSerializer\n filterset_class = ProductComponentFilterSet\n search_fields = (\"notes\",)\n ordering_fields = (\n \"component\",\n \"review_status\",\n \"license_expression\",\n \"created_date\",\n \"last_modified_date\",\n )" }, { "identifier": "ProductPackageViewSet", "path": "product_portfolio/api.py", "snippet": "class ProductPackageViewSet(ProductRelationViewSet):\n relation_fk_field = \"package\"\n queryset = ProductPackage.objects.none()\n serializer_class = ProductPackageSerializer\n filterset_class = ProductPackageFilterSet\n search_fields = (\"notes\",)\n ordering_fields = (\n \"package\",\n \"review_status\",\n \"license_expression\",\n \"created_date\",\n \"last_modified_date\",\n )" }, { "identifier": "ProductViewSet", "path": "product_portfolio/api.py", "snippet": "class ProductViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Product.objects.none()\n serializer_class = ProductSerializer\n filterset_class = ProductFilterSet\n lookup_field = \"uuid\"\n # `IsAuthenticated` and `DjangoModelPermissions` are the default values\n # set in the `DEFAULT_PERMISSION_CLASSES` settings.\n # See http://www.django-rest-framework.org/api-guide/permissions/#djangoobjectpermissions\n extra_permissions = (permissions.DjangoObjectPermissions,)\n search_fields = (\n \"name\",\n \"version\",\n \"copyright\",\n \"homepage_url\",\n )\n search_fields_autocomplete = (\n \"name\",\n \"version\",\n )\n ordering_fields = (\n \"name\",\n \"version\",\n \"configuration_status\",\n \"license_expression\",\n \"release_date\",\n \"copyright\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n return (\n Product.objects.get_queryset(self.request.user)\n .select_related(\n \"owner\",\n \"configuration_status\",\n )\n .prefetch_related(\n \"components\",\n \"packages\",\n \"licenses\",\n )\n )\n\n def perform_create(self, serializer):\n \"\"\"Add view/change/delete Object permissions to the Product creator.\"\"\"\n super().perform_create(serializer)\n assign_all_object_permissions(self.request.user, serializer.instance)" }, { "identifier": "ReportViewSet", "path": "reporting/api.py", "snippet": "class ReportViewSet(ExtraPermissionsViewSetMixin, viewsets.ReadOnlyModelViewSet):\n queryset = Report.objects.user_availables()\n serializer_class = ReportSerializer\n lookup_field = \"uuid\"\n filterset_class = ReportFilterSet\n extra_permissions = (TabPermission,)\n search_fields = (\"name\",)\n ordering_fields = (\"name\",)\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .scope(self.request.user.dataspace)\n .select_related(\n \"query__content_type\",\n \"column_template\",\n )\n )" }, { "identifier": "RequestTemplateViewSet", "path": "workflow/api.py", "snippet": "class RequestTemplateViewSet(ExtraPermissionsViewSetMixin, ReadOnlyModelViewSet):\n queryset = RequestTemplate.objects.all()\n serializer_class = RequestTemplateSerializer\n lookup_field = \"uuid\"\n extra_permissions = (TabPermission,)\n filterset_class = RequestTemplateFilterSet\n search_fields = (\n \"name\",\n \"description\",\n )\n ordering_fields = (\n \"name\",\n \"content_type\",\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .scope(self.request.user.dataspace)\n .select_related(\n \"default_assignee\",\n \"content_type\",\n )\n .prefetch_related(\n \"questions\",\n )\n )" }, { "identifier": "RequestViewSet", "path": "workflow/api.py", "snippet": "class RequestViewSet(ExtraPermissionsViewSetMixin, CreateRetrieveUpdateListViewSet):\n queryset = Request.objects.all()\n serializer_class = RequestSerializer\n lookup_field = \"uuid\"\n filterset_class = RequestFilterSet\n extra_permissions = (TabPermission,)\n search_fields = (\n \"title\",\n \"serialized_data\",\n )\n search_fields_autocomplete = (\"title\",)\n ordering_fields = (\n \"title\",\n \"request_template\",\n \"status\",\n \"priority\",\n \"assignee\",\n \"requester\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n user = self.request.user\n qs = (\n super()\n .get_queryset()\n .product_secured(user)\n .select_related(\n \"request_template\",\n \"requester\",\n \"assignee\",\n \"priority\",\n \"product_context\",\n \"content_type\",\n )\n .prefetch_related( # one extra query per content_type\n \"content_object\",\n )\n )\n if not user.is_staff:\n qs = qs.filter(is_private=False)\n return qs\n\n def perform_create(self, serializer):\n super().perform_create(serializer)\n send_request_notification(serializer.instance, created=True)\n\n def perform_update(self, serializer):\n super().perform_update(serializer)\n send_request_notification(serializer.instance, created=False)\n serializer.instance.events.create(\n user=self.request.user,\n text=\"Request edited.\",\n event_type=RequestEvent.EDIT,\n dataspace=self.request.user.dataspace,\n )" } ]
from django.conf import settings from django.conf.urls import include from django.contrib import admin from django.contrib.auth import views as auth_views from django.contrib.auth.decorators import login_required from django.template.loader import render_to_string from django.urls import path from django.views.defaults import page_not_found from django.views.generic import RedirectView from django.views.generic import TemplateView from notifications.views import mark_all_as_read from rest_framework.documentation import include_docs_urls from rest_framework.routers import DefaultRouter from component_catalog.api import ComponentViewSet from component_catalog.api import PackageViewSet from component_catalog.api import SubcomponentViewSet from component_catalog.views import send_scan_notification from dje import two_factor from dje.admin import dejacode_site from dje.api import ExternalReferenceViewSet from dje.forms import DejaCodeAuthenticationForm from dje.registration import DejaCodeActivationView from dje.registration import DejaCodeRegistrationForm from dje.views import AccountProfileView from dje.views import AllNotificationsList from dje.views import DataspaceAwareAutocompleteLookup from dje.views import DataspaceAwareRelatedLookup from dje.views import GlobalSearchListView from dje.views import IntegrationsStatusView from dje.views import UnreadNotificationsList from dje.views import home_view from dje.views import index_dispatch from dje.views import urn_resolve_view from license_library.api import LicenseAnnotationViewSet from license_library.api import LicenseViewSet from organization.api import OwnerViewSet from policy.api import UsagePolicyViewSet from product_portfolio.api import CodebaseResourceViewSet from product_portfolio.api import ProductComponentViewSet from product_portfolio.api import ProductPackageViewSet from product_portfolio.api import ProductViewSet from reporting.api import ReportViewSet from workflow.api import RequestTemplateViewSet from workflow.api import RequestViewSet from django_registration.backends.activation.views import RegistrationView
12,474
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # # Replace the default admin site with the DejaCode one. admin.site = dejacode_site # Restframework API api_router = DefaultRouter() api_router.register("owners", OwnerViewSet) api_router.register("licenses", LicenseViewSet) api_router.register("license_annotations", LicenseAnnotationViewSet) api_router.register("components", ComponentViewSet) api_router.register("subcomponents", SubcomponentViewSet) api_router.register("packages", PackageViewSet) api_router.register("products", ProductViewSet) api_router.register("product_components", ProductComponentViewSet) api_router.register("product_packages", ProductPackageViewSet) api_router.register("codebase_resources", CodebaseResourceViewSet) api_router.register("request_templates", RequestTemplateViewSet) api_router.register("requests", RequestViewSet) api_router.register("reports", ReportViewSet) api_router.register("external_references", ExternalReferenceViewSet) api_router.register("usage_policies", UsagePolicyViewSet) urlpatterns = [ path("", index_dispatch, name="index_dispatch"), path("home/", home_view, name="home"), path("integrations_status/", IntegrationsStatusView.as_view(), name="integrations_status"), path("account/", include("django.contrib.auth.urls")), path("account/profile/", AccountProfileView.as_view(), name="account_profile"), path("logout/", auth_views.LogoutView.as_view(next_page="login"), name="logout"), path( "login/", two_factor.LoginView.as_view( authentication_form=DejaCodeAuthenticationForm, redirect_authenticated_user=True, ), name="login", ), # Activation and password views are required for the user creation flow. # registration_activation_complete needs to be register before registration_activate # so the 'complete/' segment is not caught as the activation_key path( "account/activate/complete/", TemplateView.as_view(template_name="django_registration/activation_complete.html"), name="django_registration_activation_complete", ), path( "account/activate/<str:activation_key>/", DejaCodeActivationView.as_view(), name="django_registration_activate", ), # Two-factor authentication path("account/2fa/enable/", two_factor.EnableView.as_view(), name="account_2fa_enable"), path("account/2fa/disable/", two_factor.DisableView.as_view(), name="account_2fa_disable"), path("account/2fa/verify/", two_factor.VerifyView.as_view(), name="account_2fa_verify"), path("urn/", urn_resolve_view, name="urn_resolve"), path("urn/<urn>/", urn_resolve_view, name="urn_resolve"), path("admin/", dejacode_site.urls), # Grappelli does not have a hook for replacing the ``RelatedLookup`` view # class so we hijack the url used by that view and use our own version of # ``RelatedLookup``. The same is done for ``AutocompleteLookup``. path( "grappelli/lookup/related/", DataspaceAwareRelatedLookup.as_view(), name="grp_related_lookup", ), path( "grappelli/lookup/autocomplete/", DataspaceAwareAutocompleteLookup.as_view(), name="grp_autocomplete_lookup", ), # Disable Grappelli's M2M lookup. path("grappelli/lookup/m2m/", page_not_found, name="grp_m2m_lookup"), # This need to be registered after the overrides. path("grappelli/", include("grappelli.urls")), path("favicon.ico", RedirectView.as_view(url="/static/img/favicon.ico", permanent=True)), ] urlpatterns += [ path("licenses/", include(("license_library.urls", "license_library"))), path("", include(("component_catalog.urls", "component_catalog"))), path("products/", include(("product_portfolio.urls", "product_portfolio"))), path("owners/", include(("organization.urls", "organization"))), path("requests/", include(("workflow.urls", "workflow"))), path("reports/", include(("reporting.urls", "reporting"))), path("global_search/", GlobalSearchListView.as_view(), name="global_search"), ] notification_patterns = [
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # # Replace the default admin site with the DejaCode one. admin.site = dejacode_site # Restframework API api_router = DefaultRouter() api_router.register("owners", OwnerViewSet) api_router.register("licenses", LicenseViewSet) api_router.register("license_annotations", LicenseAnnotationViewSet) api_router.register("components", ComponentViewSet) api_router.register("subcomponents", SubcomponentViewSet) api_router.register("packages", PackageViewSet) api_router.register("products", ProductViewSet) api_router.register("product_components", ProductComponentViewSet) api_router.register("product_packages", ProductPackageViewSet) api_router.register("codebase_resources", CodebaseResourceViewSet) api_router.register("request_templates", RequestTemplateViewSet) api_router.register("requests", RequestViewSet) api_router.register("reports", ReportViewSet) api_router.register("external_references", ExternalReferenceViewSet) api_router.register("usage_policies", UsagePolicyViewSet) urlpatterns = [ path("", index_dispatch, name="index_dispatch"), path("home/", home_view, name="home"), path("integrations_status/", IntegrationsStatusView.as_view(), name="integrations_status"), path("account/", include("django.contrib.auth.urls")), path("account/profile/", AccountProfileView.as_view(), name="account_profile"), path("logout/", auth_views.LogoutView.as_view(next_page="login"), name="logout"), path( "login/", two_factor.LoginView.as_view( authentication_form=DejaCodeAuthenticationForm, redirect_authenticated_user=True, ), name="login", ), # Activation and password views are required for the user creation flow. # registration_activation_complete needs to be register before registration_activate # so the 'complete/' segment is not caught as the activation_key path( "account/activate/complete/", TemplateView.as_view(template_name="django_registration/activation_complete.html"), name="django_registration_activation_complete", ), path( "account/activate/<str:activation_key>/", DejaCodeActivationView.as_view(), name="django_registration_activate", ), # Two-factor authentication path("account/2fa/enable/", two_factor.EnableView.as_view(), name="account_2fa_enable"), path("account/2fa/disable/", two_factor.DisableView.as_view(), name="account_2fa_disable"), path("account/2fa/verify/", two_factor.VerifyView.as_view(), name="account_2fa_verify"), path("urn/", urn_resolve_view, name="urn_resolve"), path("urn/<urn>/", urn_resolve_view, name="urn_resolve"), path("admin/", dejacode_site.urls), # Grappelli does not have a hook for replacing the ``RelatedLookup`` view # class so we hijack the url used by that view and use our own version of # ``RelatedLookup``. The same is done for ``AutocompleteLookup``. path( "grappelli/lookup/related/", DataspaceAwareRelatedLookup.as_view(), name="grp_related_lookup", ), path( "grappelli/lookup/autocomplete/", DataspaceAwareAutocompleteLookup.as_view(), name="grp_autocomplete_lookup", ), # Disable Grappelli's M2M lookup. path("grappelli/lookup/m2m/", page_not_found, name="grp_m2m_lookup"), # This need to be registered after the overrides. path("grappelli/", include("grappelli.urls")), path("favicon.ico", RedirectView.as_view(url="/static/img/favicon.ico", permanent=True)), ] urlpatterns += [ path("licenses/", include(("license_library.urls", "license_library"))), path("", include(("component_catalog.urls", "component_catalog"))), path("products/", include(("product_portfolio.urls", "product_portfolio"))), path("owners/", include(("organization.urls", "organization"))), path("requests/", include(("workflow.urls", "workflow"))), path("reports/", include(("reporting.urls", "reporting"))), path("global_search/", GlobalSearchListView.as_view(), name="global_search"), ] notification_patterns = [
path("", UnreadNotificationsList.as_view(), name="unread"),
16
2023-12-07 16:57:42+00:00
16k
wusize/CLIM
src/open_clip/factory.py
[ { "identifier": "OPENAI_DATASET_MEAN", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)" }, { "identifier": "OPENAI_DATASET_STD", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)" }, { "identifier": "CLIP", "path": "src/open_clip/model.py", "snippet": "class CLIP(nn.Module):\n output_dict: torch.jit.Final[bool]\n\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n output_dict: bool = False,\n freeze_text=True,\n ):\n assert freeze_text, 'For now we must freeze text'\n super().__init__()\n self.output_dict = output_dict\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n\n text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n if freeze_text:\n print(f'Freeze text encoder parameters', flush=True)\n for param in text.parameters():\n param.requires_grad = False\n text.eval()\n self.transformer = text.transformer\n self.vocab_size = text.vocab_size\n self.embed_dim = embed_dim\n self.token_embedding = text.token_embedding\n self.positional_embedding = text.positional_embedding\n self.ln_final = text.ln_final\n self.text_projection = text.text_projection\n self.register_buffer('attn_mask', text.attn_mask, persistent=False)\n\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False, **kwargs):\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.transformer.grad_checkpointing = enable\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_dense(self, image, normalize: bool = False, keep_shape=False):\n features = self.visual.encode_dense(image, keep_shape=keep_shape)\n if normalize:\n if keep_shape:\n features = F.normalize(features, dim=1)\n else:\n features = F.normalize(features, dim=-1)\n return features\n\n def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False,\n extract_type='v1'):\n features = self.visual.extract_roi_features(image, normed_boxes,\n extract_type=extract_type)\n if normalize:\n features = F.normalize(features, dim=-1)\n return features\n\n def _pool_masks(self, image, masks, normalize, mask_attn=False):\n if mask_attn:\n mask_pooled = self.visual.mask_attn_pool(image, masks)\n else:\n mask_pooled = self.visual.mask_pool(image, masks)\n if normalize:\n mask_pooled = F.normalize(mask_pooled, dim=-1)\n return mask_pooled\n\n def _pool_masks_v3(self, image, masks, normalize):\n mask_pooled_v1, x_dense = self.visual.mask_attn_pool(image, masks, return_dense=True)\n x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c\n x_dense = torch.repeat_interleave(\n x_dense, torch.tensor([len(m) for m in masks], device=x_dense.device), dim=0)\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n mask_pooled_v2 = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True)\n if normalize:\n mask_pooled_v1 = F.normalize(mask_pooled_v1, dim=-1)\n mask_pooled_v2 = F.normalize(mask_pooled_v2, dim=-1)\n return mask_pooled_v1, mask_pooled_v2\n\n def encode_masks(self, image, masks, normalize=True, mask_attn=False):\n return self._pool_masks(image, masks, normalize, mask_attn)\n\n def encode_text(self, text, normalize: bool = False):\n cast_dtype = self.transformer.get_cast_dtype()\n\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding.to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=self.attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n return F.normalize(x, dim=-1) if normalize else x\n\n def forward(self, image, text=None):\n image_features = self.encode_image(image, normalize=True)\n if text is None:\n text_features = None\n else:\n text_features = self.encode_text(text, normalize=True)\n if self.output_dict:\n return {\n \"image_features\": image_features,\n \"text_features\": text_features,\n \"logit_scale\": self.logit_scale.exp()\n }\n return image_features, text_features, self.logit_scale.exp()\n\n def train(self, mode: bool = True):\n if not isinstance(mode, bool):\n raise ValueError(\"training mode is expected to be boolean\")\n self.training = mode\n for name, module in self.named_children():\n if name == 'visual':\n if mode:\n logging.info(f'========Set module {name} as train mode========')\n else:\n logging.info(f'========Set module {name} as eval mode========')\n module.train(mode)\n else:\n logging.info(f'========Set module {name} as eval mode========')\n module.train(mode=False)\n return self" }, { "identifier": "CustomTextCLIP", "path": "src/open_clip/model.py", "snippet": "class CustomTextCLIP(nn.Module):\n output_dict: torch.jit.Final[bool]\n\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n output_dict: bool = False,\n ):\n super().__init__()\n self.output_dict = output_dict\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):\n # lock image tower as per LiT - https://arxiv.org/abs/2111.07991\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n self.text.lock(unlocked_layers, freeze_layer_norm)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.text.set_grad_checkpointing(enable)\n\n def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False):\n features = self.visual.extract_roi_features(image, normed_boxes)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_text(self, text, normalize: bool = False):\n features = self.text(text)\n return F.normalize(features, dim=-1) if normalize else features\n\n def forward(self, image, text):\n image_features = self.encode_image(image, normalize=True)\n if text is None:\n text_features = None\n else:\n text_features = self.encode_text(text, normalize=True)\n if self.output_dict:\n return {\n \"image_features\": image_features,\n \"text_features\": text_features,\n \"logit_scale\": self.logit_scale.exp()\n }\n return image_features, text_features, self.logit_scale.exp()" }, { "identifier": "convert_weights_to_lp", "path": "src/open_clip/model.py", "snippet": "def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):\n \"\"\"Convert applicable model parameters to low-precision (bf16 or fp16)\"\"\"\n\n def _convert_weights(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.to(dtype)\n if l.bias is not None:\n l.bias.data = l.bias.data.to(dtype)\n\n if isinstance(l, (nn.MultiheadAttention, Attention)):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.to(dtype)\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.to(dtype)\n\n model.apply(_convert_weights)" }, { "identifier": "convert_to_custom_text_state_dict", "path": "src/open_clip/model.py", "snippet": "def convert_to_custom_text_state_dict(state_dict: dict):\n if 'text_projection' in state_dict:\n # old format state_dict, move text tower -> .text\n new_state_dict = {}\n for k, v in state_dict.items():\n if any(k.startswith(p) for p in (\n 'text_projection',\n 'positional_embedding',\n 'token_embedding',\n 'transformer',\n 'ln_final',\n )):\n k = 'text.' + k\n new_state_dict[k] = v\n return new_state_dict\n return state_dict" }, { "identifier": "resize_pos_embed", "path": "src/open_clip/model.py", "snippet": "def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', antialias: bool = True):\n # Rescale the grid of position embeddings when loading from state_dict\n old_pos_embed = state_dict.get('visual.positional_embedding', None)\n if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):\n return\n grid_size = to_2tuple(model.visual.grid_size)\n extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)\n new_seq_len = grid_size[0] * grid_size[1] + extra_tokens\n if new_seq_len == old_pos_embed.shape[0]:\n return\n\n if extra_tokens:\n pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]\n else:\n pos_emb_tok, pos_emb_img = None, old_pos_embed\n old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))\n\n logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)\n pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)\n pos_emb_img = F.interpolate(\n pos_emb_img,\n size=grid_size,\n mode=interpolation,\n antialias=antialias,\n align_corners=False,\n )\n pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]\n if pos_emb_tok is not None:\n new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)\n else:\n new_pos_embed = pos_emb_img\n state_dict['visual.positional_embedding'] = new_pos_embed" }, { "identifier": "get_cast_dtype", "path": "src/open_clip/model.py", "snippet": "def get_cast_dtype(precision: str):\n cast_dtype = None\n if precision == 'bf16':\n cast_dtype = torch.bfloat16\n elif precision == 'fp16':\n cast_dtype = torch.float16\n return cast_dtype" }, { "identifier": "CoCa", "path": "src/open_clip/coca_model.py", "snippet": "class CoCa(nn.Module):\n def __init__(\n self,\n embed_dim,\n multimodal_cfg: MultimodalCfg,\n text_cfg: CLIPTextCfg,\n vision_cfg: CLIPVisionCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n pad_id: int = 0,\n ):\n super().__init__()\n multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg\n text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg\n vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg\n\n self.text = _build_text_tower(\n embed_dim=embed_dim,\n text_cfg=text_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n vocab_size = (\n text_cfg.vocab_size # for hf models\n if hasattr(text_cfg, \"hf_model_name\") and text_cfg.hf_model_name is not None\n else text_cfg.vocab_size\n )\n\n self.visual = _build_vision_tower(\n embed_dim=embed_dim,\n vision_cfg=vision_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n self.text_decoder = _build_text_decoder_tower(\n vocab_size,\n multimodal_cfg=multimodal_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.pad_id = pad_id\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.text.set_grad_checkpointing(enable)\n self.text_decoder.set_grad_checkpointing(enable)\n\n def _encode_image(self, images, normalize=True):\n image_latent, tokens_embs = self.visual(images)\n image_latent = F.normalize(image_latent, dim=-1) if normalize else image_latent\n return image_latent, tokens_embs\n\n def _encode_text(self, text, normalize=True, embed_cls=True):\n text = text[:, :-1] if embed_cls else text # make space for CLS token\n text_latent, token_emb = self.text(text)\n text_latent = F.normalize(text_latent, dim=-1) if normalize else text_latent\n return text_latent, token_emb\n\n def encode_image(self, images, normalize=True):\n image_latent, _ = self._encode_image(images, normalize=normalize)\n return image_latent\n\n def encode_text(self, text, normalize=True, embed_cls=True):\n text_latent, _ = self._encode_text(text, normalize=normalize, embed_cls=embed_cls)\n return text_latent\n\n def forward(self, image, text, embed_cls=True, image_latent=None, image_embs=None):\n text_latent, token_embs = self._encode_text(text, embed_cls=embed_cls)\n if image_latent is None or image_embs is None:\n image_latent, image_embs = self._encode_image(image)\n\n # TODO: add assertion to avoid bugs?\n labels = text[:, -token_embs.shape[1]:]\n\n logits = self.text_decoder(image_embs, token_embs)\n return {\n \"image_features\": image_latent,\n \"text_features\": text_latent,\n \"logits\": logits,\n \"labels\": labels,\n \"logit_scale\": self.logit_scale.exp()\n }\n\n def generate(\n self,\n image,\n text=None,\n seq_len=30,\n max_seq_len=77,\n temperature=1.,\n generation_type=\"beam_search\",\n top_p=0.1, # keep tokens in the 1 - top_p quantile\n top_k=1, # keeps the top_k most probable tokens\n pad_token_id=None,\n eos_token_id=None,\n sot_token_id=None,\n num_beams=6,\n num_beam_groups=3,\n min_seq_len=5,\n stopping_criteria=None,\n repetition_penalty=1.0,\n fixed_output_length=False # if True output.shape == (batch_size, seq_len)\n ):\n # taking many ideas and components from HuggingFace GenerationMixin\n # https://huggingface.co/docs/transformers/main/en/main_classes/text_generation\n assert _has_transformers, \"Please install transformers for generate functionality. `pip install transformers`.\"\n assert seq_len > min_seq_len, \"seq_len must be larger than min_seq_len\"\n\n with torch.no_grad():\n sot_token_id = 49406 if sot_token_id is None else sot_token_id\n eos_token_id = 49407 if eos_token_id is None else eos_token_id\n pad_token_id = self.pad_id if pad_token_id is None else pad_token_id\n logit_processor = LogitsProcessorList(\n [\n MinLengthLogitsProcessor(min_seq_len, eos_token_id),\n RepetitionPenaltyLogitsProcessor(repetition_penalty),\n ]\n )\n\n if stopping_criteria is None:\n stopping_criteria = [MaxLengthCriteria(max_length=seq_len)]\n\n stopping_criteria = StoppingCriteriaList(\n stopping_criteria\n )\n\n device = image.device\n\n if generation_type == \"beam_search\":\n output = self._generate_beamsearch(\n image_inputs = image,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n sot_token_id=sot_token_id,\n num_beams=num_beams,\n num_beam_groups=num_beam_groups,\n min_seq_len=min_seq_len,\n stopping_criteria=stopping_criteria,\n logit_processor=logit_processor,\n )\n if fixed_output_length and output.shape[1] < seq_len:\n return torch.cat(\n (output, torch.ones(output.shape[0], seq_len-output.shape[1], device=device, dtype=output.dtype) * self.pad_id),\n dim=1\n )\n return output\n\n elif generation_type == \"top_p\":\n logit_warper = GENERATION_TYPES[generation_type](top_p)\n elif generation_type == \"top_k\":\n logit_warper = GENERATION_TYPES[generation_type](top_k)\n else:\n raise ValueError(\n f\"generation_type has to be one of \"\n f\"{'| ' + ' | '.join(list(GENERATION_TYPES.keys())) + ' |'}.\"\n )\n\n image_latent, image_embs = self._encode_image(image)\n\n if text is None:\n text = torch.ones((image.shape[0], 1), device=device, dtype=torch.long) * sot_token_id\n\n was_training = self.training\n num_dims = len(text.shape)\n\n if num_dims == 1:\n text = text[None, :]\n\n cur_len = text.shape[1]\n self.eval()\n out = text\n\n while True:\n x = out[:, -max_seq_len:]\n cur_len = x.shape[1]\n logits = self(image, x, image_latent=image_latent, image_embs=image_embs, embed_cls=False)[\"logits\"][:, -1]\n mask = (out[:, -1] == eos_token_id) | (out[:, -1] == pad_token_id)\n sample = torch.ones((out.shape[0], 1), device=device, dtype=torch.long) * pad_token_id\n\n if mask.all():\n if not fixed_output_length:\n break\n else:\n logits = logits[~mask, :]\n filtered_logits = logit_processor(x[~mask, :], logits)\n filtered_logits = logit_warper(x[~mask, :], filtered_logits)\n probs = F.softmax(filtered_logits / temperature, dim=-1)\n\n if (cur_len + 1 == seq_len):\n sample[~mask, :] = torch.ones((sum(~mask), 1), device=device, dtype=torch.long) * eos_token_id\n else:\n sample[~mask, :] = torch.multinomial(probs, 1)\n\n out = torch.cat((out, sample), dim=-1)\n\n cur_len += 1\n\n if stopping_criteria(out, None):\n break\n\n if num_dims == 1:\n out = out.squeeze(0)\n\n self.train(was_training)\n return out\n\n def _generate_beamsearch(\n self,\n image_inputs,\n pad_token_id=None,\n eos_token_id=None,\n sot_token_id=None,\n num_beams=6,\n num_beam_groups=3,\n min_seq_len=5,\n stopping_criteria=None,\n logit_processor=None,\n logit_warper=None,\n ):\n device = image_inputs.device\n batch_size = image_inputs.shape[0]\n image_inputs = torch.repeat_interleave(image_inputs, num_beams, dim=0)\n image_latent, image_embs = self._encode_image(image_inputs)\n\n input_ids = torch.ones((batch_size * num_beams, 1), device=device, dtype=torch.long)\n input_ids = input_ids * sot_token_id\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=num_beams,\n device=device,\n num_beam_groups=num_beam_groups,\n )\n # instantiate logits processors\n logits_processor = (\n LogitsProcessorList([MinLengthLogitsProcessor(min_seq_len, eos_token_id=eos_token_id)])\n if logit_processor is None\n else logit_processor\n )\n\n batch_size = len(beam_scorer._beam_hyps)\n num_beams = beam_scorer.num_beams\n num_beam_groups = beam_scorer.num_beam_groups\n num_sub_beams = num_beams // num_beam_groups\n batch_beam_size, cur_len = input_ids.shape\n beam_indices = None\n\n if num_beams * batch_size != batch_beam_size:\n raise ValueError(\n f\"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}.\"\n )\n\n beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)\n # initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in\n # the same group don't produce same tokens everytime.\n beam_scores[:, ::num_sub_beams] = 0\n beam_scores = beam_scores.view((batch_size * num_beams,))\n\n while True:\n\n # predicted tokens in cur_len step\n current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)\n\n # indices which will form the beams in the next time step\n reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)\n\n # do one decoder step on all beams of all sentences in batch\n model_inputs = prepare_inputs_for_generation(input_ids=input_ids, image_inputs=image_inputs)\n outputs = self(\n model_inputs['images'],\n model_inputs['text'],\n embed_cls=False,\n image_latent=image_latent,\n image_embs=image_embs\n )\n\n for beam_group_idx in range(num_beam_groups):\n group_start_idx = beam_group_idx * num_sub_beams\n group_end_idx = min(group_start_idx + num_sub_beams, num_beams)\n group_size = group_end_idx - group_start_idx\n\n # indices of beams of current group among all sentences in batch\n batch_group_indices = []\n\n for batch_idx in range(batch_size):\n batch_group_indices.extend(\n [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]\n )\n group_input_ids = input_ids[batch_group_indices]\n\n # select outputs of beams of currentg group only\n next_token_logits = outputs['logits'][batch_group_indices, -1, :]\n vocab_size = next_token_logits.shape[-1]\n\n next_token_scores_processed = logits_processor(\n group_input_ids, next_token_logits, current_tokens=current_tokens, beam_group_idx=beam_group_idx\n )\n next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)\n next_token_scores = next_token_scores.expand_as(next_token_scores_processed)\n\n # reshape for beam search\n next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)\n\n next_token_scores, next_tokens = torch.topk(\n next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True\n )\n\n next_indices = torch.div(next_tokens, vocab_size, rounding_mode=\"floor\")\n next_tokens = next_tokens % vocab_size\n\n # stateless\n process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None\n beam_outputs = beam_scorer.process(\n group_input_ids,\n next_token_scores,\n next_tokens,\n next_indices,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n beam_indices=process_beam_indices,\n )\n beam_scores[batch_group_indices] = beam_outputs[\"next_beam_scores\"]\n beam_next_tokens = beam_outputs[\"next_beam_tokens\"]\n beam_idx = beam_outputs[\"next_beam_indices\"]\n\n input_ids[batch_group_indices] = group_input_ids[beam_idx]\n group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)\n current_tokens[batch_group_indices] = group_input_ids[:, -1]\n\n # (beam_idx // group_size) -> batch_idx\n # (beam_idx % group_size) -> offset of idx inside the group\n reordering_indices[batch_group_indices] = (\n num_beams * torch.div(beam_idx, group_size, rounding_mode=\"floor\") + group_start_idx + (beam_idx % group_size)\n )\n\n input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)\n\n # increase cur_len\n cur_len = cur_len + 1\n if beam_scorer.is_done or stopping_criteria(input_ids, None):\n break\n\n final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None\n sequence_outputs = beam_scorer.finalize(\n input_ids,\n beam_scores,\n next_tokens,\n next_indices,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n max_length=stopping_criteria.max_length,\n beam_indices=final_beam_indices,\n )\n return sequence_outputs['sequences']" }, { "identifier": "ClipLoss", "path": "src/open_clip/loss.py", "snippet": "class ClipLoss(nn.Module):\n\n def __init__(\n self,\n local_loss=False,\n gather_with_grad=False,\n cache_labels=False,\n rank=0,\n world_size=1,\n use_horovod=False,\n ):\n super().__init__()\n self.local_loss = local_loss\n self.gather_with_grad = gather_with_grad\n self.cache_labels = cache_labels\n self.rank = rank\n self.world_size = world_size\n self.use_horovod = use_horovod\n\n # cache state\n self.prev_num_logits = 0\n self.labels = {}\n\n def get_ground_truth(self, device, num_logits) -> torch.Tensor:\n # calculated ground-truth and cache if enabled\n if self.prev_num_logits != num_logits or device not in self.labels:\n labels = torch.arange(num_logits, device=device, dtype=torch.long)\n if self.world_size > 1 and self.local_loss:\n labels = labels + num_logits * self.rank\n if self.cache_labels:\n self.labels[device] = labels\n self.prev_num_logits = num_logits\n else:\n labels = self.labels[device]\n return labels\n\n def get_logits(self, image_features, text_features, logit_scale):\n if self.world_size > 1:\n all_image_features, all_text_features = gather_features(\n image_features, text_features,\n self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)\n\n if self.local_loss:\n logits_per_image = logit_scale * image_features @ all_text_features.T\n logits_per_text = logit_scale * text_features @ all_image_features.T\n else:\n logits_per_image = logit_scale * all_image_features @ all_text_features.T\n logits_per_text = logits_per_image.T\n else:\n logits_per_image = logit_scale * image_features @ text_features.T\n logits_per_text = logit_scale * text_features @ image_features.T\n \n return logits_per_image, logits_per_text\n\n def forward(self, image_features, text_features, logit_scale, output_dict=False):\n device = image_features.device\n logits_per_image, logits_per_text = self.get_logits(image_features, text_features, logit_scale)\n\n labels = self.get_ground_truth(device, logits_per_image.shape[0])\n\n total_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n return {\"contrastive_loss\": total_loss} if output_dict else total_loss" }, { "identifier": "DistillClipLoss", "path": "src/open_clip/loss.py", "snippet": "class DistillClipLoss(ClipLoss):\n\n def dist_loss(self, teacher_logits, student_logits):\n loss = F.kl_div(student_logits.log_softmax(dim=1),\n teacher_logits.softmax(dim=1), reduction='batchmean')\n return loss\n # return -(teacher_logits.softmax(dim=1) * student_logits.log_softmax(dim=1)).sum(dim=1).mean(dim=0)\n\n def forward(\n self,\n image_features,\n text_features,\n logit_scale,\n dist_image_features,\n dist_text_features,\n dist_logit_scale,\n output_dict=False,\n ):\n logits_per_image, logits_per_text = \\\n self.get_logits(image_features, text_features, logit_scale)\n\n dist_logits_per_image, dist_logits_per_text = \\\n self.get_logits(dist_image_features, dist_text_features, dist_logit_scale)\n\n labels = self.get_ground_truth(image_features.device, logits_per_image.shape[0])\n\n contrastive_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n distill_loss = (\n self.dist_loss(dist_logits_per_image, logits_per_image) +\n self.dist_loss(dist_logits_per_text, logits_per_text)\n ) / 2\n\n if output_dict:\n return {\"contrastive_loss\": contrastive_loss, \"loss_kl\": distill_loss}\n\n return contrastive_loss, distill_loss" }, { "identifier": "CoCaLoss", "path": "src/open_clip/loss.py", "snippet": "class CoCaLoss(ClipLoss):\n def __init__(\n self,\n caption_loss_weight,\n clip_loss_weight,\n pad_id=0, # pad_token for open_clip custom tokenizer\n local_loss=False,\n gather_with_grad=False,\n cache_labels=False,\n rank=0,\n world_size=1,\n use_horovod=False,\n ):\n super().__init__(\n local_loss=local_loss,\n gather_with_grad=gather_with_grad,\n cache_labels=cache_labels,\n rank=rank,\n world_size=world_size,\n use_horovod=use_horovod\n )\n\n self.clip_loss_weight = clip_loss_weight\n self.caption_loss_weight = caption_loss_weight\n self.caption_loss = nn.CrossEntropyLoss(ignore_index=pad_id)\n\n def forward(self, image_features, text_features, logits, labels, logit_scale, output_dict=False):\n clip_loss = super().forward(image_features, text_features, logit_scale)\n clip_loss = self.clip_loss_weight * clip_loss\n\n caption_loss = self.caption_loss(\n logits.permute(0, 2, 1),\n labels,\n )\n caption_loss = caption_loss * self.caption_loss_weight\n\n if output_dict:\n return {\"contrastive_loss\": clip_loss, \"caption_loss\": caption_loss}\n\n return clip_loss, caption_loss" }, { "identifier": "load_openai_model", "path": "src/open_clip/openai.py", "snippet": "def load_openai_model(\n name: str,\n precision: Optional[str] = None,\n device: Optional[Union[str, torch.device]] = None,\n jit: bool = True,\n cache_dir: Optional[str] = None,\n):\n \"\"\"Load a CLIP model\n\n Parameters\n ----------\n name : str\n A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict\n precision: str\n Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.\n device : Union[str, torch.device]\n The device to put the loaded model\n jit : bool\n Whether to load the optimized JIT model (default) or more hackable non-JIT model.\n cache_dir : Optional[str]\n The directory to cache the downloaded model weights\n\n Returns\n -------\n model : torch.nn.Module\n The CLIP model\n preprocess : Callable[[PIL.Image], torch.Tensor]\n A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input\n \"\"\"\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if precision is None:\n precision = 'fp32' if device == 'cpu' else 'fp16'\n\n if get_pretrained_url(name, 'openai'):\n model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)\n elif os.path.isfile(name):\n model_path = name\n else:\n raise RuntimeError(f\"Model {name} not found; available models = {list_openai_models()}\")\n\n try:\n # loading JIT archive\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n state_dict = None\n except RuntimeError:\n # loading saved state dict\n if jit:\n warnings.warn(f\"File {model_path} is not a JIT archive. Loading as a state dict instead\")\n jit = False\n state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if not jit:\n # Build a non-jit model from the OpenAI jitted model state dict\n cast_dtype = get_cast_dtype(precision)\n try:\n model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)\n except KeyError:\n sd = {k[7:]: v for k, v in state_dict[\"state_dict\"].items()}\n model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)\n\n # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use\n model = model.to(device)\n if precision.startswith('amp') or precision == 'fp32':\n model.float()\n elif precision == 'bf16':\n convert_weights_to_lp(model, dtype=torch.bfloat16)\n\n return model\n\n # patch the device names\n device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])\n device_node = [n for n in device_holder.graph.findAllNodes(\"prim::Constant\") if \"Device\" in repr(n)][-1]\n\n def patch_device(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\"cuda\"):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_image)\n patch_device(model.encode_text)\n\n # patch dtype to float32 (typically for CPU)\n if precision == 'fp32':\n float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [1, 2]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_image)\n patch_float(model.encode_text)\n model.float()\n\n # ensure image_size attr available at consistent location for both jit and non-jit\n model.visual.image_size = model.input_resolution.item()\n return model" }, { "identifier": "is_pretrained_cfg", "path": "src/open_clip/pretrained.py", "snippet": "def is_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return False\n return _clean_tag(tag) in _PRETRAINED[model]" }, { "identifier": "get_pretrained_cfg", "path": "src/open_clip/pretrained.py", "snippet": "def get_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return {}\n model_pretrained = _PRETRAINED[model]\n return model_pretrained.get(_clean_tag(tag), {})" }, { "identifier": "download_pretrained", "path": "src/open_clip/pretrained.py", "snippet": "def download_pretrained(\n cfg: Dict,\n force_hf_hub: bool = False,\n cache_dir: Union[str, None] = None,\n):\n target = ''\n if not cfg:\n return target\n\n download_url = cfg.get('url', '')\n download_hf_hub = cfg.get('hf_hub', '')\n if download_hf_hub and force_hf_hub:\n # use HF hub even if url exists\n download_url = ''\n\n if download_url:\n target = download_pretrained_from_url(download_url, cache_dir=cache_dir)\n elif download_hf_hub:\n has_hf_hub(True)\n # we assume the hf_hub entries in pretrained config combine model_id + filename in\n # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and\n # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.\n model_id, filename = os.path.split(download_hf_hub)\n if filename:\n target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)\n else:\n target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)\n\n return target" }, { "identifier": "list_pretrained_tags_by_model", "path": "src/open_clip/pretrained.py", "snippet": "def list_pretrained_tags_by_model(model: str):\n \"\"\" return all pretrain tags for the specified model architecture \"\"\"\n tags = []\n if model in _PRETRAINED:\n tags.extend(_PRETRAINED[model].keys())\n return tags" }, { "identifier": "download_pretrained_from_hf", "path": "src/open_clip/pretrained.py", "snippet": "def download_pretrained_from_hf(\n model_id: str,\n filename: str = 'open_clip_pytorch_model.bin',\n revision=None,\n cache_dir: Union[str, None] = None,\n):\n has_hf_hub(True)\n cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)\n return cached_file" }, { "identifier": "image_transform", "path": "src/open_clip/transform.py", "snippet": "def image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n resize_longest_max: bool = False,\n fill_color: int = 0,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n if isinstance(aug_cfg, dict):\n aug_cfg = AugmentationCfg(**aug_cfg)\n else:\n aug_cfg = aug_cfg or AugmentationCfg()\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n aug_cfg_dict = {k: v for k, v in asdict(aug_cfg).items() if v is not None}\n use_timm = aug_cfg_dict.pop('use_timm', False)\n if use_timm:\n from timm.data import create_transform # timm can still be optional\n if isinstance(image_size, (tuple, list)):\n assert len(image_size) >= 2\n input_size = (3,) + image_size[-2:]\n else:\n input_size = (3, image_size, image_size)\n # by default, timm aug randomly alternates bicubic & bilinear for better robustness at inference time\n aug_cfg_dict.setdefault('interpolation', 'random')\n aug_cfg_dict.setdefault('color_jitter', None) # disable by default\n train_transform = create_transform(\n input_size=input_size,\n is_training=True,\n hflip=0.,\n mean=mean,\n std=std,\n re_mode='pixel',\n **aug_cfg_dict,\n )\n else:\n train_transform = Compose([\n RandomResizedCrop(\n image_size,\n scale=aug_cfg_dict.pop('scale'),\n interpolation=InterpolationMode.BICUBIC,\n ),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n if aug_cfg_dict:\n warnings.warn(f'Unused augmentation cfg items, specify `use_timm` to use ({list(aug_cfg_dict.keys())}).')\n return train_transform\n else:\n if resize_longest_max:\n transforms = [\n ResizeMaxSize(image_size, fill=fill_color)\n ]\n else:\n transforms = [\n Resize(image_size, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ]\n transforms.extend([\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n return Compose(transforms)" }, { "identifier": "AugmentationCfg", "path": "src/open_clip/transform.py", "snippet": "class AugmentationCfg:\n scale: Tuple[float, float] = (0.9, 1.0)\n ratio: Optional[Tuple[float, float]] = None\n color_jitter: Optional[Union[float, Tuple[float, float, float]]] = None\n interpolation: Optional[str] = None\n re_prob: Optional[float] = None\n re_count: Optional[int] = None\n use_timm: bool = False" }, { "identifier": "det_image_transform", "path": "src/open_clip/transform.py", "snippet": "def det_image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n fill_color: int = 0,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n raise NotImplementedError\n else:\n transforms = [\n ResizeLongest(image_size, fill=fill_color),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ]\n return Compose(transforms)" }, { "identifier": "HFTokenizer", "path": "src/open_clip/tokenizer.py", "snippet": "class HFTokenizer:\n \"\"\"HuggingFace tokenizer wrapper\"\"\"\n\n def __init__(self, tokenizer_name: str):\n from transformers import AutoTokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n\n def save_pretrained(self, dest):\n self.tokenizer.save_pretrained(dest)\n\n def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:\n # same cleaning as for default tokenizer, except lowercasing\n # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance\n if isinstance(texts, str):\n texts = [texts]\n texts = [whitespace_clean(basic_clean(text)) for text in texts]\n input_ids = self.tokenizer(\n texts,\n return_tensors='pt',\n max_length=context_length,\n padding='max_length',\n truncation=True,\n ).input_ids\n return input_ids" }, { "identifier": "tokenize", "path": "src/open_clip/tokenizer.py", "snippet": "def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:\n \"\"\"\n Returns the tokenized representation of given input string(s)\n\n Parameters\n ----------\n texts : Union[str, List[str]]\n An input string or a list of input strings to tokenize\n context_length : int\n The context length to use; all CLIP models use 77 as the context length\n\n Returns\n -------\n A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]\n \"\"\"\n if isinstance(texts, str):\n texts = [texts]\n\n sot_token = _tokenizer.encoder[\"<start_of_text>\"]\n eot_token = _tokenizer.encoder[\"<end_of_text>\"]\n all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]\n result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)\n\n for i, tokens in enumerate(all_tokens):\n if len(tokens) > context_length:\n tokens = tokens[:context_length] # Truncate\n tokens[-1] = eot_token\n result[i, :len(tokens)] = torch.tensor(tokens)\n\n return result" } ]
import json import logging import os import pathlib import re import torch from copy import deepcopy from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\ resize_pos_embed, get_cast_dtype from .coca_model import CoCa from .loss import ClipLoss, DistillClipLoss, CoCaLoss from .openai import load_openai_model from .pretrained import is_pretrained_cfg, get_pretrained_cfg, \ download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf from .transform import image_transform, AugmentationCfg, det_image_transform from .tokenizer import HFTokenizer, tokenize from open_clip import eva_clip from open_clip import eva_clip
13,630
with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) pretrained_cfg = config['preprocess_cfg'] model_cfg = config['model_cfg'] else: model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names checkpoint_path = None pretrained_cfg = {} model_cfg = None if isinstance(device, str): device = torch.device(device) if pretrained == 'eva': return eva_clip.create_model(model_name=model_name, pretrained=cache_dir, force_custom_clip=True, precision=precision, device=device,) if pretrained and pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model( model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.' f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.') logging.warning(error_str) raise RuntimeError(error_str) pretrained_loaded = True elif has_hf_hub_prefix: logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) pretrained_loaded = True if require_pretrained and not pretrained_loaded: # callers of create_model_from_pretrained always expect pretrained weights raise RuntimeError( f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.') model.to(device=device) if precision in ("fp16", "bf16"): convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16) # set image / mean metadata from pretrained_cfg if available, or use default model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True if jit: model = torch.jit.script(model) return model def create_loss(args):
HF_HUB_PREFIX = 'hf-hub:' _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _rescan_model_configs(): global _MODEL_CONFIGS config_ext = ('.json',) config_files = [] for config_path in _MODEL_CONFIG_PATHS: if config_path.is_file() and config_path.suffix in config_ext: config_files.append(config_path) elif config_path.is_dir(): for ext in config_ext: config_files.extend(config_path.glob(f'*{ext}')) for cf in config_files: with open(cf, 'r') as f: model_cfg = json.load(f) if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')): _MODEL_CONFIGS[cf.stem] = model_cfg _MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))} _rescan_model_configs() # initial populate of model config registry def list_models(): """ enumerate available model architectures based on config files """ return list(_MODEL_CONFIGS.keys()) def add_model_config(path): """ add model config path or file and update registry """ if not isinstance(path, Path): path = Path(path) _MODEL_CONFIG_PATHS.append(path) _rescan_model_configs() def get_model_config(model_name): if model_name in _MODEL_CONFIGS: return deepcopy(_MODEL_CONFIGS[model_name]) else: return None def get_tokenizer(model_name): if 'EVA' in model_name: return eva_clip.get_tokenizer(model_name) if model_name.startswith(HF_HUB_PREFIX): tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):]) else: config = get_model_config(model_name) tokenizer = HFTokenizer( config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize return tokenizer def load_state_dict(checkpoint_path: str, map_location='cpu'): checkpoint = torch.load(checkpoint_path, map_location=map_location) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint if next(iter(state_dict.items()))[0].startswith('module'): state_dict = {k[7:]: v for k, v in state_dict.items()} return state_dict def load_checkpoint(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) # detect old format and make compatible with new format if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): state_dict = convert_to_custom_text_state_dict(state_dict) resize_pos_embed(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, cache_dir: Optional[str] = None, output_dict: Optional[bool] = None, require_pretrained: bool = False, ): has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX) if has_hf_hub_prefix: model_id = model_name[len(HF_HUB_PREFIX):] checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir) config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir) with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) pretrained_cfg = config['preprocess_cfg'] model_cfg = config['model_cfg'] else: model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names checkpoint_path = None pretrained_cfg = {} model_cfg = None if isinstance(device, str): device = torch.device(device) if pretrained == 'eva': return eva_clip.create_model(model_name=model_name, pretrained=cache_dir, force_custom_clip=True, precision=precision, device=device,) if pretrained and pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model( model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.' f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.') logging.warning(error_str) raise RuntimeError(error_str) pretrained_loaded = True elif has_hf_hub_prefix: logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) pretrained_loaded = True if require_pretrained and not pretrained_loaded: # callers of create_model_from_pretrained always expect pretrained weights raise RuntimeError( f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.') model.to(device=device) if precision in ("fp16", "bf16"): convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16) # set image / mean metadata from pretrained_cfg if available, or use default model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True if jit: model = torch.jit.script(model) return model def create_loss(args):
return ClipLoss(
9
2023-12-09 05:43:08+00:00
16k
moonshot-admin/moonshot
third-party/tqdm-4.66.1/tqdm/tk.py
[ { "identifier": "TqdmExperimentalWarning", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class TqdmExperimentalWarning(TqdmWarning, FutureWarning):\n \"\"\"beta feature, unstable API and behaviour\"\"\"\n pass" }, { "identifier": "TqdmWarning", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class TqdmWarning(Warning):\n \"\"\"base class for all tqdm warnings.\n\n Used for non-external-code-breaking errors, such as garbled printing.\n \"\"\"\n def __init__(self, msg, fp_write=None, *a, **k):\n if fp_write is not None:\n fp_write(\"\\n\" + self.__class__.__name__ + \": \" + str(msg).rstrip() + '\\n')\n else:\n super(TqdmWarning, self).__init__(msg, *a, **k)" }, { "identifier": "tqdm", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class tqdm(Comparable):\n \"\"\"\n Decorate an iterable object, returning an iterator which acts exactly\n like the original iterable, but prints a dynamically updating\n progressbar every time a value is requested.\n\n Parameters\n ----------\n iterable : iterable, optional\n Iterable to decorate with a progressbar.\n Leave blank to manually manage the updates.\n desc : str, optional\n Prefix for the progressbar.\n total : int or float, optional\n The number of expected iterations. If unspecified,\n len(iterable) is used if possible. If float(\"inf\") or as a last\n resort, only basic progress statistics are displayed\n (no ETA, no progressbar).\n If `gui` is True and this parameter needs subsequent updating,\n specify an initial arbitrary large positive number,\n e.g. 9e9.\n leave : bool, optional\n If [default: True], keeps all traces of the progressbar\n upon termination of iteration.\n If `None`, will leave only if `position` is `0`.\n file : `io.TextIOWrapper` or `io.StringIO`, optional\n Specifies where to output the progress messages\n (default: sys.stderr). Uses `file.write(str)` and `file.flush()`\n methods. For encoding, see `write_bytes`.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes the progressbar to stay within this bound.\n If unspecified, attempts to use environment width. The\n fallback is a meter width of 10 and no limit for the counter and\n statistics. If 0, will not print any meter (only stats).\n mininterval : float, optional\n Minimum progress display update interval [default: 0.1] seconds.\n maxinterval : float, optional\n Maximum progress display update interval [default: 10] seconds.\n Automatically adjusts `miniters` to correspond to `mininterval`\n after long display update lag. Only works if `dynamic_miniters`\n or monitor thread is enabled.\n miniters : int or float, optional\n Minimum progress display update interval, in iterations.\n If 0 and `dynamic_miniters`, will automatically adjust to equal\n `mininterval` (more CPU efficient, good for tight loops).\n If > 0, will skip display of specified number of iterations.\n Tweak this and `mininterval` to get very efficient loops.\n If your progress is erratic with both fast and slow iterations\n (network, skipping items, etc) you should set miniters=1.\n ascii : bool or str, optional\n If unspecified or False, use unicode (smooth blocks) to fill\n the meter. The fallback is to use ASCII characters \" 123456789#\".\n disable : bool, optional\n Whether to disable the entire progressbar wrapper\n [default: False]. If set to None, disable on non-TTY.\n unit : str, optional\n String that will be used to define the unit of each iteration\n [default: it].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be reduced/scaled\n automatically and a metric prefix following the\n International System of Units standard will be added\n (kilo, mega, etc.) [default: False]. If any other non-zero\n number, will scale `total` and `n`.\n dynamic_ncols : bool, optional\n If set, constantly alters `ncols` and `nrows` to the\n environment (allowing for window resizes) [default: False].\n smoothing : float, optional\n Exponential moving average smoothing factor for speed estimates\n (ignored in GUI mode). Ranges from 0 (average speed) to 1\n (current/instantaneous speed) [default: 0.3].\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n initial : int or float, optional\n The initial counter value. Useful when restarting a progress\n bar [default: 0]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n position : int, optional\n Specify the line offset to print this bar (starting from 0)\n Automatic if unspecified.\n Useful to manage multiple bars at once (eg, from threads).\n postfix : dict or *, optional\n Specify additional stats to display at the end of the bar.\n Calls `set_postfix(**postfix)` if possible (dict).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n write_bytes : bool, optional\n Whether to write bytes. If (default: False) will write unicode.\n lock_args : tuple, optional\n Passed to `refresh` for intermediate output\n (initialisation, iterating, and updating).\n nrows : int, optional\n The screen height. If specified, hides nested bars outside this\n bound. If unspecified, attempts to use environment height.\n The fallback is 20.\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n delay : float, optional\n Don't display until [default: 0] seconds have elapsed.\n gui : bool, optional\n WARNING: internal parameter - do not use.\n Use tqdm.gui.tqdm(...) instead. If set, will attempt to use\n matplotlib animations for a graphical output [default: False].\n\n Returns\n -------\n out : decorated iterator.\n \"\"\"\n\n monitor_interval = 10 # set to 0 to disable the thread\n monitor = None\n _instances = WeakSet()\n\n @staticmethod\n def format_sizeof(num, suffix='', divisor=1000):\n \"\"\"\n Formats a number (greater than unity) with SI Order of Magnitude\n prefixes.\n\n Parameters\n ----------\n num : float\n Number ( >= 1) to format.\n suffix : str, optional\n Post-postfix [default: ''].\n divisor : float, optional\n Divisor between prefixes [default: 1000].\n\n Returns\n -------\n out : str\n Number with Order of Magnitude SI unit postfix.\n \"\"\"\n for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 999.5:\n if abs(num) < 99.95:\n if abs(num) < 9.995:\n return '{0:1.2f}'.format(num) + unit + suffix\n return '{0:2.1f}'.format(num) + unit + suffix\n return '{0:3.0f}'.format(num) + unit + suffix\n num /= divisor\n return '{0:3.1f}Y'.format(num) + suffix\n\n @staticmethod\n def format_interval(t):\n \"\"\"\n Formats a number of seconds as a clock time, [H:]MM:SS\n\n Parameters\n ----------\n t : int\n Number of seconds.\n\n Returns\n -------\n out : str\n [H:]MM:SS\n \"\"\"\n mins, s = divmod(int(t), 60)\n h, m = divmod(mins, 60)\n if h:\n return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)\n else:\n return '{0:02d}:{1:02d}'.format(m, s)\n\n @staticmethod\n def format_num(n):\n \"\"\"\n Intelligent scientific notation (.3g).\n\n Parameters\n ----------\n n : int or float or Numeric\n A Number.\n\n Returns\n -------\n out : str\n Formatted number.\n \"\"\"\n f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')\n n = str(n)\n return f if len(f) < len(n) else n\n\n @staticmethod\n def status_printer(file):\n \"\"\"\n Manage the printing and in-place updating of a line of characters.\n Note that if the string is longer than a line, then in-place\n updating may not work (it will print a new line at each refresh).\n \"\"\"\n fp = file\n fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover\n if fp in (sys.stderr, sys.stdout):\n getattr(sys.stderr, 'flush', lambda: None)()\n getattr(sys.stdout, 'flush', lambda: None)()\n\n def fp_write(s):\n fp.write(str(s))\n fp_flush()\n\n last_len = [0]\n\n def print_status(s):\n len_s = disp_len(s)\n fp_write('\\r' + s + (' ' * max(last_len[0] - len_s, 0)))\n last_len[0] = len_s\n\n return print_status\n\n @staticmethod\n def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',\n unit_scale=False, rate=None, bar_format=None, postfix=None,\n unit_divisor=1000, initial=0, colour=None, **extra_kwargs):\n \"\"\"\n Return a string-based progress bar given some parameters\n\n Parameters\n ----------\n n : int or float\n Number of finished iterations.\n total : int or float\n The expected total number of iterations. If meaningless (None),\n only basic progress statistics are displayed (no ETA).\n elapsed : float\n Number of seconds passed since start.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes `{bar}` to stay within this bound\n [default: None]. If `0`, will not print any bar (only stats).\n The fallback is `{bar:10}`.\n prefix : str, optional\n Prefix message (included in total width) [default: ''].\n Use as {desc} in bar_format string.\n ascii : bool, optional or str, optional\n If not set, use unicode (smooth blocks) to fill the meter\n [default: False]. The fallback is to use ASCII characters\n \" 123456789#\".\n unit : str, optional\n The iteration unit [default: 'it'].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be printed with an\n appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)\n [default: False]. If any other non-zero number, will scale\n `total` and `n`.\n rate : float, optional\n Manual override for iteration rate.\n If [default: None], uses n/elapsed.\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n postfix : *, optional\n Similar to `prefix`, but placed at the end\n (e.g. for additional stats).\n Note: postfix is usually a string (not a dict) for this method,\n and will if possible be set to postfix = ', ' + postfix.\n However other types are supported (#382).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n initial : int or float, optional\n The initial counter value [default: 0].\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n\n Returns\n -------\n out : Formatted meter and stats, ready to display.\n \"\"\"\n\n # sanity check: total\n if total and n >= (total + 0.5): # allow float imprecision (#849)\n total = None\n\n # apply custom scale if necessary\n if unit_scale and unit_scale not in (True, 1):\n if total:\n total *= unit_scale\n n *= unit_scale\n if rate:\n rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt\n unit_scale = False\n\n elapsed_str = tqdm.format_interval(elapsed)\n\n # if unspecified, attempt to use rate = average speed\n # (we allow manual override since predicting time is an arcane art)\n if rate is None and elapsed:\n rate = (n - initial) / elapsed\n inv_rate = 1 / rate if rate else None\n format_sizeof = tqdm.format_sizeof\n rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else\n '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'\n rate_inv_fmt = (\n (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))\n if inv_rate else '?') + 's/' + unit\n rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt\n\n if unit_scale:\n n_fmt = format_sizeof(n, divisor=unit_divisor)\n total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'\n else:\n n_fmt = str(n)\n total_fmt = str(total) if total is not None else '?'\n\n try:\n postfix = ', ' + postfix if postfix else ''\n except TypeError:\n pass\n\n remaining = (total - n) / rate if rate and total else 0\n remaining_str = tqdm.format_interval(remaining) if rate else '?'\n try:\n eta_dt = (datetime.now() + timedelta(seconds=remaining)\n if rate and total else datetime.utcfromtimestamp(0))\n except OverflowError:\n eta_dt = datetime.max\n\n # format the stats displayed to the left and right sides of the bar\n if prefix:\n # old prefix setup work around\n bool_prefix_colon_already = (prefix[-2:] == \": \")\n l_bar = prefix if bool_prefix_colon_already else prefix + \": \"\n else:\n l_bar = ''\n\n r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'\n\n # Custom bar formatting\n # Populate a dict with all available progress indicators\n format_dict = {\n # slight extension of self.format_dict\n 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,\n 'elapsed': elapsed_str, 'elapsed_s': elapsed,\n 'ncols': ncols, 'desc': prefix or '', 'unit': unit,\n 'rate': inv_rate if inv_rate and inv_rate > 1 else rate,\n 'rate_fmt': rate_fmt, 'rate_noinv': rate,\n 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,\n 'rate_inv_fmt': rate_inv_fmt,\n 'postfix': postfix, 'unit_divisor': unit_divisor,\n 'colour': colour,\n # plus more useful definitions\n 'remaining': remaining_str, 'remaining_s': remaining,\n 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,\n **extra_kwargs}\n\n # total is known: we can predict some stats\n if total:\n # fractional and percentage progress\n frac = n / total\n percentage = frac * 100\n\n l_bar += '{0:3.0f}%|'.format(percentage)\n\n if ncols == 0:\n return l_bar[:-1] + r_bar[1:]\n\n format_dict.update(l_bar=l_bar)\n if bar_format:\n format_dict.update(percentage=percentage)\n\n # auto-remove colon for empty `{desc}`\n if not prefix:\n bar_format = bar_format.replace(\"{desc}: \", '')\n else:\n bar_format = \"{l_bar}{bar}{r_bar}\"\n\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar # no `{bar}`; nothing else to do\n\n # Formatting progress bar space available for bar's display\n full_bar = Bar(frac,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,\n colour=colour)\n if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):\n bar_format = str(bar_format)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n\n elif bar_format:\n # user-specified bar_format but no total\n l_bar += '|'\n format_dict.update(l_bar=l_bar, percentage=0)\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar\n full_bar = Bar(0,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.BLANK, colour=colour)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n else:\n # no total: no progressbar, ETA, just progress stats\n return (f'{(prefix + \": \") if prefix else \"\"}'\n f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')\n\n def __new__(cls, *_, **__):\n instance = object.__new__(cls)\n with cls.get_lock(): # also constructs lock if non-existent\n cls._instances.add(instance)\n # create monitoring thread\n if cls.monitor_interval and (cls.monitor is None\n or not cls.monitor.report()):\n try:\n cls.monitor = TMonitor(cls, cls.monitor_interval)\n except Exception as e: # pragma: nocover\n warn(\"tqdm:disabling monitor support\"\n \" (monitor_interval = 0) due to:\\n\" + str(e),\n TqdmMonitorWarning, stacklevel=2)\n cls.monitor_interval = 0\n return instance\n\n @classmethod\n def _get_free_pos(cls, instance=None):\n \"\"\"Skips specified instance.\"\"\"\n positions = {abs(inst.pos) for inst in cls._instances\n if inst is not instance and hasattr(inst, \"pos\")}\n return min(set(range(len(positions) + 1)).difference(positions))\n\n @classmethod\n def _decr_instances(cls, instance):\n \"\"\"\n Remove from list and reposition another unfixed bar\n to fill the new gap.\n\n This means that by default (where all nested bars are unfixed),\n order is not maintained but screen flicker/blank space is minimised.\n (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)\n \"\"\"\n with cls._lock:\n try:\n cls._instances.remove(instance)\n except KeyError:\n # if not instance.gui: # pragma: no cover\n # raise\n pass # py2: maybe magically removed already\n # else:\n if not instance.gui:\n last = (instance.nrows or 20) - 1\n # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)\n instances = list(filter(\n lambda i: hasattr(i, \"pos\") and last <= i.pos,\n cls._instances))\n # set first found to current `pos`\n if instances:\n inst = min(instances, key=lambda i: i.pos)\n inst.clear(nolock=True)\n inst.pos = abs(instance.pos)\n\n @classmethod\n def write(cls, s, file=None, end=\"\\n\", nolock=False):\n \"\"\"Print a message via tqdm (without overlap with bars).\"\"\"\n fp = file if file is not None else sys.stdout\n with cls.external_write_mode(file=file, nolock=nolock):\n # Write the message\n fp.write(s)\n fp.write(end)\n\n @classmethod\n @contextmanager\n def external_write_mode(cls, file=None, nolock=False):\n \"\"\"\n Disable tqdm within context and refresh tqdm when exits.\n Useful when writing to standard output stream\n \"\"\"\n fp = file if file is not None else sys.stdout\n\n try:\n if not nolock:\n cls.get_lock().acquire()\n # Clear all bars\n inst_cleared = []\n for inst in getattr(cls, '_instances', []):\n # Clear instance if in the target output file\n # or if write output + tqdm output are both either\n # sys.stdout or sys.stderr (because both are mixed in terminal)\n if hasattr(inst, \"start_t\") and (inst.fp == fp or all(\n f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):\n inst.clear(nolock=True)\n inst_cleared.append(inst)\n yield\n # Force refresh display of bars we cleared\n for inst in inst_cleared:\n inst.refresh(nolock=True)\n finally:\n if not nolock:\n cls._lock.release()\n\n @classmethod\n def set_lock(cls, lock):\n \"\"\"Set the global lock.\"\"\"\n cls._lock = lock\n\n @classmethod\n def get_lock(cls):\n \"\"\"Get the global lock. Construct it if it does not exist.\"\"\"\n if not hasattr(cls, '_lock'):\n cls._lock = TqdmDefaultWriteLock()\n return cls._lock\n\n @classmethod\n def pandas(cls, **tqdm_kwargs):\n \"\"\"\n Registers the current `tqdm` class with\n pandas.core.\n ( frame.DataFrame\n | series.Series\n | groupby.(generic.)DataFrameGroupBy\n | groupby.(generic.)SeriesGroupBy\n ).progress_apply\n\n A new instance will be created every time `progress_apply` is called,\n and each instance will automatically `close()` upon completion.\n\n Parameters\n ----------\n tqdm_kwargs : arguments for the tqdm instance\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from tqdm import tqdm\n >>> from tqdm.gui import tqdm as tqdm_gui\n >>>\n >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))\n >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc\n >>> # Now you can use `progress_apply` instead of `apply`\n >>> df.groupby(0).progress_apply(lambda x: x**2)\n\n References\n ----------\n <https://stackoverflow.com/questions/18603270/\\\n progress-indicator-during-pandas-operations-python>\n \"\"\"\n from warnings import catch_warnings, simplefilter\n\n from pandas.core.frame import DataFrame\n from pandas.core.series import Series\n try:\n with catch_warnings():\n simplefilter(\"ignore\", category=FutureWarning)\n from pandas import Panel\n except ImportError: # pandas>=1.2.0\n Panel = None\n Rolling, Expanding = None, None\n try: # pandas>=1.0.0\n from pandas.core.window.rolling import _Rolling_and_Expanding\n except ImportError:\n try: # pandas>=0.18.0\n from pandas.core.window import _Rolling_and_Expanding\n except ImportError: # pandas>=1.2.0\n try: # pandas>=1.2.0\n from pandas.core.window.expanding import Expanding\n from pandas.core.window.rolling import Rolling\n _Rolling_and_Expanding = Rolling, Expanding\n except ImportError: # pragma: no cover\n _Rolling_and_Expanding = None\n try: # pandas>=0.25.0\n from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy\n from pandas.core.groupby.generic import DataFrameGroupBy\n except ImportError: # pragma: no cover\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy\n except ImportError:\n from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import GroupBy\n except ImportError: # pragma: no cover\n from pandas.core.groupby import GroupBy\n\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import PanelGroupBy\n except ImportError:\n try:\n from pandas.core.groupby import PanelGroupBy\n except ImportError: # pandas>=0.25.0\n PanelGroupBy = None\n\n tqdm_kwargs = tqdm_kwargs.copy()\n deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]\n\n def inner_generator(df_function='apply'):\n def inner(df, func, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n df : (DataFrame|Series)[GroupBy]\n Data (may be grouped).\n func : function\n To be applied on the (grouped) data.\n **kwargs : optional\n Transmitted to `df.apply()`.\n \"\"\"\n\n # Precompute total iterations\n total = tqdm_kwargs.pop(\"total\", getattr(df, 'ngroups', None))\n if total is None: # not grouped\n if df_function == 'applymap':\n total = df.size\n elif isinstance(df, Series):\n total = len(df)\n elif (_Rolling_and_Expanding is None or\n not isinstance(df, _Rolling_and_Expanding)):\n # DataFrame or Panel\n axis = kwargs.get('axis', 0)\n if axis == 'index':\n axis = 0\n elif axis == 'columns':\n axis = 1\n # when axis=0, total is shape[axis1]\n total = df.size // df.shape[axis]\n\n # Init bar\n if deprecated_t[0] is not None:\n t = deprecated_t[0]\n deprecated_t[0] = None\n else:\n t = cls(total=total, **tqdm_kwargs)\n\n if len(args) > 0:\n # *args intentionally not supported (see #244, #299)\n TqdmDeprecationWarning(\n \"Except func, normal arguments are intentionally\" +\n \" not supported by\" +\n \" `(DataFrame|Series|GroupBy).progress_apply`.\" +\n \" Use keyword arguments instead.\",\n fp_write=getattr(t.fp, 'write', sys.stderr.write))\n\n try: # pandas>=1.3.0\n from pandas.core.common import is_builtin_func\n except ImportError:\n is_builtin_func = df._is_builtin_func\n try:\n func = is_builtin_func(func)\n except TypeError:\n pass\n\n # Define bar updating wrapper\n def wrapper(*args, **kwargs):\n # update tbar correctly\n # it seems `pandas apply` calls `func` twice\n # on the first column/row to decide whether it can\n # take a fast or slow code path; so stop when t.total==t.n\n t.update(n=1 if not t.total or t.n < t.total else 0)\n return func(*args, **kwargs)\n\n # Apply the provided function (in **kwargs)\n # on the df using our wrapper (which provides bar updating)\n try:\n return getattr(df, df_function)(wrapper, **kwargs)\n finally:\n t.close()\n\n return inner\n\n # Monkeypatch pandas to provide easy methods\n # Enable custom tqdm progress in pandas!\n Series.progress_apply = inner_generator()\n SeriesGroupBy.progress_apply = inner_generator()\n Series.progress_map = inner_generator('map')\n SeriesGroupBy.progress_map = inner_generator('map')\n\n DataFrame.progress_apply = inner_generator()\n DataFrameGroupBy.progress_apply = inner_generator()\n DataFrame.progress_applymap = inner_generator('applymap')\n\n if Panel is not None:\n Panel.progress_apply = inner_generator()\n if PanelGroupBy is not None:\n PanelGroupBy.progress_apply = inner_generator()\n\n GroupBy.progress_apply = inner_generator()\n GroupBy.progress_aggregate = inner_generator('aggregate')\n GroupBy.progress_transform = inner_generator('transform')\n\n if Rolling is not None and Expanding is not None:\n Rolling.progress_apply = inner_generator()\n Expanding.progress_apply = inner_generator()\n elif _Rolling_and_Expanding is not None:\n _Rolling_and_Expanding.progress_apply = inner_generator()\n\n # override defaults via env vars\n @envwrap(\"TQDM_\", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,\n 'position': int, 'nrows': int})\n def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,\n ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,\n ascii=None, disable=False, unit='it', unit_scale=False,\n dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,\n position=None, postfix=None, unit_divisor=1000, write_bytes=False,\n lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,\n **kwargs):\n \"\"\"see tqdm.tqdm for arguments\"\"\"\n if file is None:\n file = sys.stderr\n\n if write_bytes:\n # Despite coercing unicode into bytes, py2 sys.std* streams\n # should have bytes written to them.\n file = SimpleTextIOWrapper(\n file, encoding=getattr(file, 'encoding', None) or 'utf-8')\n\n file = DisableOnWriteError(file, tqdm_instance=self)\n\n if disable is None and hasattr(file, \"isatty\") and not file.isatty():\n disable = True\n\n if total is None and iterable is not None:\n try:\n total = len(iterable)\n except (TypeError, AttributeError):\n total = None\n if total == float(\"inf\"):\n # Infinite iterations, behave same as unknown\n total = None\n\n if disable:\n self.iterable = iterable\n self.disable = disable\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n self.n = initial\n self.total = total\n self.leave = leave\n return\n\n if kwargs:\n self.disable = True\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n raise (\n TqdmDeprecationWarning(\n \"`nested` is deprecated and automated.\\n\"\n \"Use `position` instead for manual control.\\n\",\n fp_write=getattr(file, 'write', sys.stderr.write))\n if \"nested\" in kwargs else\n TqdmKeyError(\"Unknown argument(s): \" + str(kwargs)))\n\n # Preprocess the arguments\n if (\n (ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))\n ) or dynamic_ncols: # pragma: no cover\n if dynamic_ncols:\n dynamic_ncols = _screen_shape_wrapper()\n if dynamic_ncols:\n ncols, nrows = dynamic_ncols(file)\n else:\n _dynamic_ncols = _screen_shape_wrapper()\n if _dynamic_ncols:\n _ncols, _nrows = _dynamic_ncols(file)\n if ncols is None:\n ncols = _ncols\n if nrows is None:\n nrows = _nrows\n\n if miniters is None:\n miniters = 0\n dynamic_miniters = True\n else:\n dynamic_miniters = False\n\n if mininterval is None:\n mininterval = 0\n\n if maxinterval is None:\n maxinterval = 0\n\n if ascii is None:\n ascii = not _supports_unicode(file)\n\n if bar_format and ascii is not True and not _is_ascii(ascii):\n # Convert bar format into unicode since terminal uses unicode\n bar_format = str(bar_format)\n\n if smoothing is None:\n smoothing = 0\n\n # Store the arguments\n self.iterable = iterable\n self.desc = desc or ''\n self.total = total\n self.leave = leave\n self.fp = file\n self.ncols = ncols\n self.nrows = nrows\n self.mininterval = mininterval\n self.maxinterval = maxinterval\n self.miniters = miniters\n self.dynamic_miniters = dynamic_miniters\n self.ascii = ascii\n self.disable = disable\n self.unit = unit\n self.unit_scale = unit_scale\n self.unit_divisor = unit_divisor\n self.initial = initial\n self.lock_args = lock_args\n self.delay = delay\n self.gui = gui\n self.dynamic_ncols = dynamic_ncols\n self.smoothing = smoothing\n self._ema_dn = EMA(smoothing)\n self._ema_dt = EMA(smoothing)\n self._ema_miniters = EMA(smoothing)\n self.bar_format = bar_format\n self.postfix = None\n self.colour = colour\n self._time = time\n if postfix:\n try:\n self.set_postfix(refresh=False, **postfix)\n except TypeError:\n self.postfix = postfix\n\n # Init the iterations counters\n self.last_print_n = initial\n self.n = initial\n\n # if nested, at initial sp() call we replace '\\r' by '\\n' to\n # not overwrite the outer progress bar\n with self._lock:\n # mark fixed positions as negative\n self.pos = self._get_free_pos(self) if position is None else -position\n\n if not gui:\n # Initialize the screen printer\n self.sp = self.status_printer(self.fp)\n if delay <= 0:\n self.refresh(lock_args=self.lock_args)\n\n # Init the time counter\n self.last_print_t = self._time()\n # NB: Avoid race conditions by setting start_t at the very end of init\n self.start_t = self.last_print_t\n\n def __bool__(self):\n if self.total is not None:\n return self.total > 0\n if self.iterable is None:\n raise TypeError('bool() undefined when iterable == total == None')\n return bool(self.iterable)\n\n def __len__(self):\n return (\n self.total if self.iterable is None\n else self.iterable.shape[0] if hasattr(self.iterable, \"shape\")\n else len(self.iterable) if hasattr(self.iterable, \"__len__\")\n else self.iterable.__length_hint__() if hasattr(self.iterable, \"__length_hint__\")\n else getattr(self, \"total\", None))\n\n def __reversed__(self):\n try:\n orig = self.iterable\n except AttributeError:\n raise TypeError(\"'tqdm' object is not reversible\")\n else:\n self.iterable = reversed(self.iterable)\n return self.__iter__()\n finally:\n self.iterable = orig\n\n def __contains__(self, item):\n contains = getattr(self.iterable, '__contains__', None)\n return contains(item) if contains is not None else item in self.__iter__()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n self.close()\n except AttributeError:\n # maybe eager thread cleanup upon external error\n if (exc_type, exc_value, traceback) == (None, None, None):\n raise\n warn(\"AttributeError ignored\", TqdmWarning, stacklevel=2)\n\n def __del__(self):\n self.close()\n\n def __str__(self):\n return self.format_meter(**self.format_dict)\n\n @property\n def _comparable(self):\n return abs(getattr(self, \"pos\", 1 << 31))\n\n def __hash__(self):\n return id(self)\n\n def __iter__(self):\n \"\"\"Backward-compatibility to use: for x in tqdm(iterable)\"\"\"\n\n # Inlining instance variables as locals (speed optimisation)\n iterable = self.iterable\n\n # If the bar is disabled, then just walk the iterable\n # (note: keep this check outside the loop for performance)\n if self.disable:\n for obj in iterable:\n yield obj\n return\n\n mininterval = self.mininterval\n last_print_t = self.last_print_t\n last_print_n = self.last_print_n\n min_start_t = self.start_t + self.delay\n n = self.n\n time = self._time\n\n try:\n for obj in iterable:\n yield obj\n # Update and possibly print the progressbar.\n # Note: does not call self.update(1) for speed optimisation.\n n += 1\n\n if n - last_print_n >= self.miniters:\n cur_t = time()\n dt = cur_t - last_print_t\n if dt >= mininterval and cur_t >= min_start_t:\n self.update(n - last_print_n)\n last_print_n = self.last_print_n\n last_print_t = self.last_print_t\n finally:\n self.n = n\n self.close()\n\n def update(self, n=1):\n \"\"\"\n Manually update the progress bar, useful for streams\n such as reading files.\n E.g.:\n >>> t = tqdm(total=filesize) # Initialise\n >>> for current_buffer in stream:\n ... ...\n ... t.update(len(current_buffer))\n >>> t.close()\n The last line is highly recommended, but possibly not necessary if\n `t.update()` will be called in such a way that `filesize` will be\n exactly reached and printed.\n\n Parameters\n ----------\n n : int or float, optional\n Increment to add to the internal counter of iterations\n [default: 1]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n\n Returns\n -------\n out : bool or None\n True if a `display()` was triggered.\n \"\"\"\n if self.disable:\n return\n\n if n < 0:\n self.last_print_n += n # for auto-refresh logic to work\n self.n += n\n\n # check counter first to reduce calls to time()\n if self.n - self.last_print_n >= self.miniters:\n cur_t = self._time()\n dt = cur_t - self.last_print_t\n if dt >= self.mininterval and cur_t >= self.start_t + self.delay:\n cur_t = self._time()\n dn = self.n - self.last_print_n # >= n\n if self.smoothing and dt and dn:\n # EMA (not just overall average)\n self._ema_dn(dn)\n self._ema_dt(dt)\n self.refresh(lock_args=self.lock_args)\n if self.dynamic_miniters:\n # If no `miniters` was specified, adjust automatically to the\n # maximum iteration rate seen so far between two prints.\n # e.g.: After running `tqdm.update(5)`, subsequent\n # calls to `tqdm.update()` will only cause an update after\n # at least 5 more iterations.\n if self.maxinterval and dt >= self.maxinterval:\n self.miniters = dn * (self.mininterval or self.maxinterval) / dt\n elif self.smoothing:\n # EMA miniters update\n self.miniters = self._ema_miniters(\n dn * (self.mininterval / dt if self.mininterval and dt\n else 1))\n else:\n # max iters between two prints\n self.miniters = max(self.miniters, dn)\n\n # Store old values for next call\n self.last_print_n = self.n\n self.last_print_t = cur_t\n return True\n\n def close(self):\n \"\"\"Cleanup and (if leave=False) close the progressbar.\"\"\"\n if self.disable:\n return\n\n # Prevent multiple closures\n self.disable = True\n\n # decrement instance pos and remove from internal set\n pos = abs(self.pos)\n self._decr_instances(self)\n\n if self.last_print_t < self.start_t + self.delay:\n # haven't ever displayed; nothing to clear\n return\n\n # GUI mode\n if getattr(self, 'sp', None) is None:\n return\n\n # annoyingly, _supports_unicode isn't good enough\n def fp_write(s):\n self.fp.write(str(s))\n\n try:\n fp_write('')\n except ValueError as e:\n if 'closed' in str(e):\n return\n raise # pragma: no cover\n\n leave = pos == 0 if self.leave is None else self.leave\n\n with self._lock:\n if leave:\n # stats for overall rate (no weighted average)\n self._ema_dt = lambda: None\n self.display(pos=0)\n fp_write('\\n')\n else:\n # clear previous display\n if self.display(msg='', pos=pos) and not pos:\n fp_write('\\r')\n\n def clear(self, nolock=False):\n \"\"\"Clear current bar display.\"\"\"\n if self.disable:\n return\n\n if not nolock:\n self._lock.acquire()\n pos = abs(self.pos)\n if pos < (self.nrows or 20):\n self.moveto(pos)\n self.sp('')\n self.fp.write('\\r') # place cursor back at the beginning of line\n self.moveto(-pos)\n if not nolock:\n self._lock.release()\n\n def refresh(self, nolock=False, lock_args=None):\n \"\"\"\n Force refresh the display of this bar.\n\n Parameters\n ----------\n nolock : bool, optional\n If `True`, does not lock.\n If [default: `False`]: calls `acquire()` on internal lock.\n lock_args : tuple, optional\n Passed to internal lock's `acquire()`.\n If specified, will only `display()` if `acquire()` returns `True`.\n \"\"\"\n if self.disable:\n return\n\n if not nolock:\n if lock_args:\n if not self._lock.acquire(*lock_args):\n return False\n else:\n self._lock.acquire()\n self.display()\n if not nolock:\n self._lock.release()\n return True\n\n def unpause(self):\n \"\"\"Restart tqdm timer from last print time.\"\"\"\n if self.disable:\n return\n cur_t = self._time()\n self.start_t += cur_t - self.last_print_t\n self.last_print_t = cur_t\n\n def reset(self, total=None):\n \"\"\"\n Resets to 0 iterations for repeated use.\n\n Consider combining with `leave=True`.\n\n Parameters\n ----------\n total : int or float, optional. Total to use for the new bar.\n \"\"\"\n self.n = 0\n if total is not None:\n self.total = total\n if self.disable:\n return\n self.last_print_n = 0\n self.last_print_t = self.start_t = self._time()\n self._ema_dn = EMA(self.smoothing)\n self._ema_dt = EMA(self.smoothing)\n self._ema_miniters = EMA(self.smoothing)\n self.refresh()\n\n def set_description(self, desc=None, refresh=True):\n \"\"\"\n Set/modify description of the progress bar.\n\n Parameters\n ----------\n desc : str, optional\n refresh : bool, optional\n Forces refresh [default: True].\n \"\"\"\n self.desc = desc + ': ' if desc else ''\n if refresh:\n self.refresh()\n\n def set_description_str(self, desc=None, refresh=True):\n \"\"\"Set/modify description without ': ' appended.\"\"\"\n self.desc = desc or ''\n if refresh:\n self.refresh()\n\n def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):\n \"\"\"\n Set/modify postfix (additional stats)\n with automatic formatting based on datatype.\n\n Parameters\n ----------\n ordered_dict : dict or OrderedDict, optional\n refresh : bool, optional\n Forces refresh [default: True].\n kwargs : dict, optional\n \"\"\"\n # Sort in alphabetical order to be more deterministic\n postfix = OrderedDict([] if ordered_dict is None else ordered_dict)\n for key in sorted(kwargs.keys()):\n postfix[key] = kwargs[key]\n # Preprocess stats according to datatype\n for key in postfix.keys():\n # Number: limit the length of the string\n if isinstance(postfix[key], Number):\n postfix[key] = self.format_num(postfix[key])\n # Else for any other type, try to get the string conversion\n elif not isinstance(postfix[key], str):\n postfix[key] = str(postfix[key])\n # Else if it's a string, don't need to preprocess anything\n # Stitch together to get the final postfix\n self.postfix = ', '.join(key + '=' + postfix[key].strip()\n for key in postfix.keys())\n if refresh:\n self.refresh()\n\n def set_postfix_str(self, s='', refresh=True):\n \"\"\"\n Postfix without dictionary expansion, similar to prefix handling.\n \"\"\"\n self.postfix = str(s)\n if refresh:\n self.refresh()\n\n def moveto(self, n):\n # TODO: private method\n self.fp.write('\\n' * n + _term_move_up() * -n)\n getattr(self.fp, 'flush', lambda: None)()\n\n @property\n def format_dict(self):\n \"\"\"Public API for read-only member access.\"\"\"\n if self.disable and not hasattr(self, 'unit'):\n return defaultdict(lambda: None, {\n 'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})\n if self.dynamic_ncols:\n self.ncols, self.nrows = self.dynamic_ncols(self.fp)\n return {\n 'n': self.n, 'total': self.total,\n 'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,\n 'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,\n 'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,\n 'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,\n 'bar_format': self.bar_format, 'postfix': self.postfix,\n 'unit_divisor': self.unit_divisor, 'initial': self.initial,\n 'colour': self.colour}\n\n def display(self, msg=None, pos=None):\n \"\"\"\n Use `self.sp` to display `msg` in the specified `pos`.\n\n Consider overloading this function when inheriting to use e.g.:\n `self.some_frontend(**self.format_dict)` instead of `self.sp`.\n\n Parameters\n ----------\n msg : str, optional. What to display (default: `repr(self)`).\n pos : int, optional. Position to `moveto`\n (default: `abs(self.pos)`).\n \"\"\"\n if pos is None:\n pos = abs(self.pos)\n\n nrows = self.nrows or 20\n if pos >= nrows - 1:\n if pos >= nrows:\n return False\n if msg or msg is None: # override at `nrows - 1`\n msg = \" ... (more hidden) ...\"\n\n if not hasattr(self, \"sp\"):\n raise TqdmDeprecationWarning(\n \"Please use `tqdm.gui.tqdm(...)`\"\n \" instead of `tqdm(..., gui=True)`\\n\",\n fp_write=getattr(self.fp, 'write', sys.stderr.write))\n\n if pos:\n self.moveto(pos)\n self.sp(self.__str__() if msg is None else msg)\n if pos:\n self.moveto(-pos)\n return True\n\n @classmethod\n @contextmanager\n def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):\n \"\"\"\n stream : file-like object.\n method : str, \"read\" or \"write\". The result of `read()` and\n the first argument of `write()` should have a `len()`.\n\n >>> with tqdm.wrapattr(file_obj, \"read\", total=file_obj.size) as fobj:\n ... while True:\n ... chunk = fobj.read(chunk_size)\n ... if not chunk:\n ... break\n \"\"\"\n with cls(total=total, **tqdm_kwargs) as t:\n if bytes:\n t.unit = \"B\"\n t.unit_scale = True\n t.unit_divisor = 1024\n yield CallbackIOWrapper(t.update, stream, method)" } ]
import re import sys import tkinter import tkinter.ttk as ttk from warnings import warn from .std import TqdmExperimentalWarning, TqdmWarning from .std import tqdm as std_tqdm
13,102
""" Tkinter GUI progressbar decorator for iterators. Usage: >>> from tqdm.tk import trange, tqdm >>> for i in trange(10): ... ... """ __author__ = {"github.com/": ["richardsheridan", "casperdcl"]} __all__ = ['tqdm_tk', 'ttkrange', 'tqdm', 'trange'] class tqdm_tk(std_tqdm): # pragma: no cover """ Experimental Tkinter GUI version of tqdm! Note: Window interactivity suffers if `tqdm_tk` is not running within a Tkinter mainloop and values are generated infrequently. In this case, consider calling `tqdm_tk.refresh()` frequently in the Tk thread. """ # TODO: @classmethod: write()? def __init__(self, *args, **kwargs): """ This class accepts the following parameters *in addition* to the parameters accepted by `tqdm`. Parameters ---------- grab : bool, optional Grab the input across all windows of the process. tk_parent : `tkinter.Wm`, optional Parent Tk window. cancel_callback : Callable, optional Create a cancel button and set `cancel_callback` to be called when the cancel or window close button is clicked. """ kwargs = kwargs.copy() kwargs['gui'] = True # convert disable = None to False kwargs['disable'] = bool(kwargs.get('disable', False)) self._warn_leave = 'leave' in kwargs grab = kwargs.pop('grab', False) tk_parent = kwargs.pop('tk_parent', None) self._cancel_callback = kwargs.pop('cancel_callback', None) super(tqdm_tk, self).__init__(*args, **kwargs) if self.disable: return if tk_parent is None: # Discover parent widget try: tk_parent = tkinter._default_root except AttributeError: raise AttributeError( "`tk_parent` required when using `tkinter.NoDefaultRoot()`") if tk_parent is None: # use new default root window as display self._tk_window = tkinter.Tk() else: # some other windows already exist self._tk_window = tkinter.Toplevel() else: self._tk_window = tkinter.Toplevel(tk_parent)
""" Tkinter GUI progressbar decorator for iterators. Usage: >>> from tqdm.tk import trange, tqdm >>> for i in trange(10): ... ... """ __author__ = {"github.com/": ["richardsheridan", "casperdcl"]} __all__ = ['tqdm_tk', 'ttkrange', 'tqdm', 'trange'] class tqdm_tk(std_tqdm): # pragma: no cover """ Experimental Tkinter GUI version of tqdm! Note: Window interactivity suffers if `tqdm_tk` is not running within a Tkinter mainloop and values are generated infrequently. In this case, consider calling `tqdm_tk.refresh()` frequently in the Tk thread. """ # TODO: @classmethod: write()? def __init__(self, *args, **kwargs): """ This class accepts the following parameters *in addition* to the parameters accepted by `tqdm`. Parameters ---------- grab : bool, optional Grab the input across all windows of the process. tk_parent : `tkinter.Wm`, optional Parent Tk window. cancel_callback : Callable, optional Create a cancel button and set `cancel_callback` to be called when the cancel or window close button is clicked. """ kwargs = kwargs.copy() kwargs['gui'] = True # convert disable = None to False kwargs['disable'] = bool(kwargs.get('disable', False)) self._warn_leave = 'leave' in kwargs grab = kwargs.pop('grab', False) tk_parent = kwargs.pop('tk_parent', None) self._cancel_callback = kwargs.pop('cancel_callback', None) super(tqdm_tk, self).__init__(*args, **kwargs) if self.disable: return if tk_parent is None: # Discover parent widget try: tk_parent = tkinter._default_root except AttributeError: raise AttributeError( "`tk_parent` required when using `tkinter.NoDefaultRoot()`") if tk_parent is None: # use new default root window as display self._tk_window = tkinter.Tk() else: # some other windows already exist self._tk_window = tkinter.Toplevel() else: self._tk_window = tkinter.Toplevel(tk_parent)
warn("GUI is experimental/alpha", TqdmExperimentalWarning, stacklevel=2)
0
2023-12-14 07:43:03+00:00
16k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Protocol\"] = self.readInt()\n fields[\"KeyVersion\"] = self.readInt()\n fields[\"MajorVersion\"] = self.readInt()\n fields[\"MinorVersion\"] = self.readInt()\n fields[\"Build\"] = self.readInt()\n fields[\"ContentHash\"] = self.readString()\n fields[\"DeviceType\"] = self.readInt()\n fields[\"AppStore\"] = self.readInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20100, fields, cryptoInit)\n\n def getMessageType(self):\n return 10100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginMessage", "path": "Heart/Packets/Client/Authentification/LoginMessage.py", "snippet": "class LoginMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"ClientMajor\"] = self.readInt()\n fields[\"ClientMinor\"] = self.readInt()\n fields[\"ClientBuild\"] = self.readInt()\n fields[\"ResourceSha\"] = self.readString()\n fields[\"Device\"] = self.readString()\n fields[\"PreferredLanguage\"] = self.readDataReference()\n fields[\"PreferredDeviceLanguage\"] = self.readString()\n fields[\"OSVersion\"] = self.readString()\n fields[\"isAndroid\"] = self.readBoolean()\n fields[\"IMEI\"] = self.readString()\n fields[\"AndroidID\"] = self.readString()\n fields[\"isAdvertisingEnabled\"] = self.readBoolean()\n fields[\"AppleIFV\"] = self.readString()\n fields[\"RndKey\"] = self.readInt()\n fields[\"AppStore\"] = self.readVInt()\n fields[\"ClientVersion\"] = self.readString()\n fields[\"TencentOpenId\"] = self.readString()\n fields[\"TencentToken\"] = self.readString()\n fields[\"TencentPlatform\"] = self.readVInt()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n fields[\"AppLicensingSignature\"] = self.readString()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n if fields[\"ClientMajor\"]==53:\n calling_instance.player.ClientVersion = f'{str(fields[\"ClientMajor\"])}.{str(fields[\"ClientBuild\"])}.{str(fields[\"ClientMinor\"])}'\n fields[\"Socket\"] = calling_instance.client\n db_instance = DatabaseHandler()\n if db_instance.playerExist(fields[\"PassToken\"], fields[\"AccountID\"]):\n player_data = json.loads(db_instance.getPlayerEntry(fields[\"AccountID\"])[2])\n db_instance.loadAccount(calling_instance.player, fields[\"AccountID\"])\n else:\n db_instance.createAccount(calling_instance.player.getDataTemplate(fields[\"AccountID\"][0], fields[\"AccountID\"][1], fields[\"PassToken\"]))\n ClientsManager.AddPlayer(calling_instance.player.ID, calling_instance.client)\n Messaging.sendMessage(20104, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24399, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForBattleEndMessage", "path": "Heart/Packets/Client/Battle/AskForBattleEndMessage.py", "snippet": "class AskForBattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Unk1\"] = self.readVInt()\n fields[\"Result\"] = self.readVInt()\n fields[\"Rank\"] = self.readVInt()\n fields[\"MapID\"] = self.readDataReference()\n fields[\"HeroesCount\"] = self.readVInt()\n fields[\"Heroes\"] = []\n for i in range(fields[\"HeroesCount\"]): fields[\"Heroes\"].append({\"Brawler\": {\"ID\": self.readDataReference(), \"SkinID\": self.readDataReference()}, \"Team\": self.readVInt(), \"IsPlayer\": self.readBoolean(), \"PlayerName\": self.readString()})\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(23456, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14110\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ChangeAvatarNameMessage", "path": "Heart/Packets/Client/Home/ChangeAvatarNameMessage.py", "snippet": "class ChangeAvatarNameMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeString(fields[\"Name\"])\n self.writeBoolean(fields[\"NameSetByUser\"])\n\n def decode(self):\n fields = {}\n fields[\"Name\"] = self.readString()\n fields[\"NameSetByUser\"] = self.readBoolean()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n db_instance = DatabaseHandler()\n playerData = db_instance.getPlayer(calling_instance.player.ID)\n playerData[\"Name\"] = fields[\"Name\"]\n playerData[\"Registered\"] = True\n db_instance.updatePlayerData(playerData, calling_instance)\n fields[\"Socket\"] = calling_instance.client\n fields[\"Command\"] = {\"ID\": 201}\n Messaging.sendMessage(24111, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10212\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "EndClientTurnMessage", "path": "Heart/Packets/Client/Home/EndClientTurnMessage.py", "snippet": "class EndClientTurnMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n fields[\"Tick\"] = self.readVInt()\n fields[\"Checksum\"] = self.readVInt()\n fields[\"CommandsCount\"] = self.readVInt()\n super().decode(fields)\n fields[\"Commands\"] = []\n for i in range(fields[\"CommandsCount\"]):\n fields[\"Commands\"].append({\"ID\": self.readVInt()})\n if LogicCommandManager.commandExist(fields[\"Commands\"][i][\"ID\"]):\n command = LogicCommandManager.createCommand(fields[\"Commands\"][i][\"ID\"])\n print(\"Command\", LogicCommandManager.getCommandsName(fields[\"Commands\"][i][\"ID\"]))\n if command is not None:\n fields[\"Commands\"][i][\"Fields\"] = command.decode(self)\n fields[\"Commands\"][i][\"Instance\"] = command\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n for command in fields[\"Commands\"]:\n if \"Instance\" not in command.keys():\n return\n\n if hasattr(command[\"Instance\"], 'execute'):\n command[\"Instance\"].execute(calling_instance, command[\"Fields\"], cryptoInit)\n if command[\"ID\"] == 519:\n Messaging.sendMessage(24104, {\"Socket\": calling_instance.client, \"ServerChecksum\": 0, \"ClientChecksum\": 0, \"Tick\": 0}, cryptoInit)\n\n def getMessageType(self):\n return 14102\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeFromOfflinePractiseMessage", "path": "Heart/Packets/Client/Home/GoHomeFromOfflinePractiseMessage.py", "snippet": "class GoHomeFromOfflinePractiseMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14109\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeMessage", "path": "Heart/Packets/Client/Home/GoHomeMessage.py", "snippet": "class GoHomeMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 17750\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GetPlayerProfileMessage", "path": "Heart/Packets/Client/Home/GetPlayerProfileMessage.py", "snippet": "class GetPlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"BattleInfoBoolean\"] = self.readBoolean()\n if fields[\"BattleInfoBoolean\"]:\n fields[\"unk1\"] = self.readVInt()\n fields[\"AnotherID\"] = self.readLong()\n fields[\"unk2\"] = self.readVInt()\n for i in self.readVInt():\n fields[\"CsvID\"] = self.readDataReference()\n fields[\"unk3\"] = self.readVInt()\n fields[\"unk4\"] = self.readVInt()\n fields[\"unk5\"] = self.readVInt()\n fields[\"unk6\"] = self.readVInt()\n fields[\"PlayerName\"] = self.readString()\n fields[\"unk7\"] = self.readVInt()\n fields[\"Thumbnail\"] = self.readVInt()\n fields[\"NameColor\"] = self.readVInt()\n fields[\"unk10\"] = self.readVInt()\n fields[\"unk11\"] = self.readVInt()\n fields[\"PlayerHighID\"] = self.readInt()\n fields[\"PlayerLowID\"] = self.readInt()\n super().decode(fields)\n\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24113, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 15081\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForAllianceDataMessage", "path": "Heart/Packets/Client/Home/AskForAllianceDataMessage.py", "snippet": "class AskForAllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"id\"] = self.readVLong()\n fields[\"isInAlliance\"] = self.readBoolean()\n if fields[\"isInAlliance\"] == True:\n fields[\"anotherIDHigh\"] = self.readVInt()\n fields[\"anotherIDLow\"] = self.readVInt()\n super().decode(fields)\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24301, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14302\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveMessage", "path": "Heart/Packets/Client/Socket/KeepAliveMessage.py", "snippet": "class KeepAliveMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20108, fields, cryptoInit)\n\n def getMessageType(self):\n return 10108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginFailedMessage", "path": "Heart/Packets/Server/Authentification/LoginFailedMessage.py", "snippet": "class LoginFailedMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeInt(fields['ErrorID'])\n self.writeString(fields['FingerprintData'])\n self.writeString()\n self.writeString(fields['ContentURL'])\n self.writeString()\n self.writeString(fields['Message'])\n self.writeInt(0)\n self.writeBoolean(False)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeString()\n self.writeVInt(0)\n self.writeString()\n self.writeBoolean(False)\n\n def decode(self):\n fields = {}\n fields[\"ErrorCode\"] = self.readInt()\n fields[\"ResourceFingerprintData\"] = self.readString()\n fields[\"RedirectDomain\"] = self.readString()\n fields[\"ContentURL\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"Reason\"] = self.readString()\n fields[\"SecondsUntilMaintenanceEnd\"] = self.readInt()\n fields[\"ShowContactSupportForBan\"] = self.readBoolean()\n fields[\"CompressedFingerprintData\"] = self.readBytesWithoutLength()\n fields[\"ContentURLListCount\"] = self.readInt()\n fields[\"ContentURLList\"] = []\n for i in range(fields[\"ContentURLListCount\"]):\n fields[\"ContentURLList\"].append(self.readString())\n fields[\"KunlunAppStore\"] = self.readInt()\n fields[\"MaintenanceType\"] = self.readInt()\n fields[\"HelpshiftFaqId\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"Unk1\"] = self.readBoolean()\n fields[\"Unk2\"] = self.readBoolean()\n fields[\"Unk3\"] = self.readString()\n fields[\"Unk4\"] = self.readVInt()\n fields[\"Unk5\"] = self.readString()\n fields[\"OptionalTargetedAccountIdState\"] = self.readBoolean()\n if fields[\"OptionalTargetedAccountIdState\"] == True:\n fields[\"OptionalTargetedAccountId\"] = self.readLong()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20103\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginOkMessage", "path": "Heart/Packets/Server/Authentification/LoginOkMessage.py", "snippet": "class LoginOkMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 1\n\n def encode(self, fields, player):\n self.writeLong(player.ID[0], player.ID[1])\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(player.Token)\n self.writeString()\n self.writeString()\n self.writeInt(53)\n self.writeInt(176)\n self.writeInt(1)\n self.writeString(\"dev\")\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeString(\"RU\")\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeInt(2)\n self.writeString('https://game-assets.brawlstarsgame.com')\n self.writeString('http://a678dbc1c015a893c9fd-4e8cc3b1ad3a3c940c504815caefa967.r87.cf2.rackcdn.com')\n self.writeInt(2)\n self.writeString('https://event-assets.brawlstars.com')\n self.writeString('https://24b999e6da07674e22b0-8209975788a0f2469e68e84405ae4fcf.ssl.cf2.rackcdn.com/event-assets')\n self.writeVInt(0)\n self.writeCompressedString(b'')\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeString('https://play.google.com/store/apps/details?id=com.supercell.brawlstars')\n self.writeString()\n self.writeBoolean(False)\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"HomeID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"FacebookID\"] = self.readString()\n fields[\"GamecenterID\"] = self.readString()\n fields[\"ServerMajorVersion\"] = self.readInt()\n fields[\"ContentVersion\"] = self.readInt()\n fields[\"ServerBuild\"] = self.readInt()\n fields[\"ServerEnvironment\"] = self.readString()\n fields[\"SessionCount\"] = self.readInt()\n fields[\"PlayTimeSeconds\"] = self.readInt()\n fields[\"DaysSinceStartedPlaying\"] = self.readInt()\n fields[\"FacebookAppID\"] = self.readString()\n fields[\"ServerTime\"] = self.readString()\n fields[\"AccountCreatedDate\"] = self.readString()\n fields[\"StartupCooldownSeconds\"] = self.readInt()\n fields[\"GoogleServiceID\"] = self.readString()\n fields[\"LoginCountry\"] = self.readString()\n fields[\"KunlunID\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"TencentID\"] = self.readString()\n\n ContentUrlCount = self.readInt()\n fields[\"GameAssetsUrls\"] = []\n for i in range(ContentUrlCount):\n fields[\"GameAssetsUrls\"].append(self.readString())\n\n EventUrlCount = self.readInt()\n fields[\"EventAssetsUrls\"] = []\n for i in range(EventUrlCount):\n fields[\"EventAssetsUrls\"].append(self.readString())\n\n fields[\"SecondsUntilAccountDeletion\"] = self.readVInt()\n fields[\"SupercellIDToken\"] = self.readCompressedString()\n fields[\"IsSupercellIDLogoutAllDevicesAllowed\"] = self.readBoolean()\n fields[\"isSupercellIDEligible\"] = self.readBoolean()\n fields[\"LineID\"] = self.readString()\n fields[\"SessionID\"] = self.readString()\n fields[\"KakaoID\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"YoozooPayNotifyUrl\"] = self.readString()\n fields[\"UnbotifyEnabled\"] = self.readBoolean()\n\n Unknown1 = self.readBoolean()\n fields[\"Unknown1\"] = Unknown1\n if Unknown1:\n fields[\"Unknown2\"] = self.readString()\n\n Unknown3 = self.readBoolean()\n fields[\"Unknown3\"] = Unknown1\n if Unknown3:\n fields[\"Unknown4\"] = self.readString()\n\n Unknown5 = self.readBoolean()\n fields[\"Unknown5\"] = Unknown1\n if Unknown5:\n fields[\"Unknown6\"] = self.readString()\n\n Unknown7 = self.readBoolean()\n fields[\"Unknown7\"] = Unknown1\n if Unknown7:\n fields[\"Unknown8\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OutOfSyncMessage", "path": "Heart/Packets/Server/Authentification/OutOfSyncMessage.py", "snippet": "class OutOfSyncMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeVInt(fields[\"ServerChecksum\"])\n self.writeVInt(fields[\"ClientChecksum\"])\n self.writeVInt(fields[\"Tick\"])\n\n def decode(self):\n fields = {}\n fields[\"ServerChecksum\"] = self.readVInt()\n fields[\"ClientChecksum\"] = self.readVInt()\n fields[\"Tick\"] = self.readVInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ServerHelloMessage", "path": "Heart/Packets/Server/Authentification/ServerHelloMessage.py", "snippet": "class ServerHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeBytes(urandom(24), 24)\n\n def decode(self):\n fields = {}\n fields[\"Random\"] = self.readBytesWithoutLength()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "BattleEndMessage", "path": "Heart/Packets/Server/Battle/BattleEndMessage.py", "snippet": "class BattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeLong(0, 0) # Battle UUID High\n self.writeLong(0, 0) # Battle UUID Low\n self.writeVInt(2) # Battle End Game Mode (gametype)\n self.writeVInt(fields[\"Rank\"]) # Result (Victory/Defeat/Draw/Rank Score)\n self.writeVInt(0) # Tokens Gained (Gained Keys)\n self.writeVInt(0) # Trophies Result (Metascore change)\n self.writeVInt(0) # Power Play Points Gained (Pro League Points)\n self.writeVInt(0) # Doubled Tokens (Double Keys)\n self.writeVInt(0) # Double Token Event (Double Event Keys)\n self.writeVInt(0) # Token Doubler Remaining (Double Keys Remaining)\n self.writeVInt(0) # game Lenght In Seconds\n self.writeVInt(0) # Epic Win Power Play Points Gained (op Win Points)\n self.writeVInt(0) # Championship Level Reached (CC Wins)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n\n self.writeVInt(fields[\"HeroesCount\"])\n for heroEntry in fields[\"Heroes\"]:\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeByte(1)\n for i in range(1):\n self.writeDataReference(heroEntry[\"Brawler\"][\"ID\"][0], heroEntry[\"Brawler\"][\"ID\"][1])\n self.writeByte(1)\n for i in range(1):\n if (heroEntry[\"Brawler\"][\"SkinID\"] is None):\n self.writeVInt(0)\n else:\n self.writeDataReference(heroEntry[\"Brawler\"][\"SkinID\"][0], heroEntry[\"Brawler\"][\"SkinID\"][1])\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(1250)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(11)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n if heroEntry[\"IsPlayer\"]:\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(heroEntry[\"PlayerName\"])\n self.writeVInt(100)\n self.writeVInt(28000000)\n self.writeVInt(43000000)\n self.writeVInt(-2)\n if heroEntry[\"IsPlayer\"]:\n self.writeBoolean(True)\n self.writeVLong(5, 4181497)\n self.writeString('haccer club')\n self.writeDataReference(8, 16)\n else:\n self.writeBoolean(False)\n\n self.writeInt8(1)\n self.writeVInt(5978)\n self.writeInt8(1)\n self.writeVInt(0)\n\n self.writeInt16(5)\n self.writeInt16(3)\n self.writeInt(27328)\n self.writeInt(25659)\n\n self.writeDataReference(0)\n\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n\n def decode(self):\n fields = {}\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23456\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AvailableServerCommandMessage", "path": "Heart/Packets/Server/Home/AvailableServerCommandMessage.py", "snippet": "class AvailableServerCommandMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(fields[\"Command\"][\"ID\"])\n command = LogicCommandManager.createCommand(fields[\"Command\"][\"ID\"], self.messagePayload)\n self.messagePayload = command.encode(fields)\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24111\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LobbyInfoMessage", "path": "Heart/Packets/Server/Home/LobbyInfoMessage.py", "snippet": "class LobbyInfoMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(ClientsManager.GetCount())\n self.writeString(f\"\"\"Version: {player.ClientVersion}\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\"\")\n self.writeVInt(0) # count event\n self.writeVInt(0) # new timer in v51\n\n def decode(self):\n fields = {}\n fields[\"PlayerCount\"] = self.readVInt()\n fields[\"Text\"] = self.readString()\n fields[\"Unk1\"] = self.readVInt()\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23457\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OwnHomeDataMessage", "path": "Heart/Packets/Server/Home/OwnHomeDataMessage.py", "snippet": "class OwnHomeDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1688816070)\n self.writeVInt(1191532375)\n self.writeVInt(2023189)\n self.writeVInt(73530)\n\n self.writeVInt(player.Trophies)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(player.HighestTrophies) \n self.writeVInt(player.TrophyRoadTier)\n self.writeVInt(player.Experience)\n self.writeDataReference(28, player.Thumbnail)\n self.writeDataReference(43, player.Namecolor)\n\n self.writeVInt(26)\n for x in range(26):\n self.writeVInt(x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n \n self.writeVInt(len(player.OwnedSkins))\n for x in player.OwnedSkins:\n self.writeDataReference(29, x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(0)\n self.writeVInt(2)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(115)\n self.writeVInt(335442)\n self.writeVInt(1001442)\n self.writeVInt(5778642) \n\n self.writeVInt(120)\n self.writeVInt(200)\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(1) # Shop Offers\n\n self.writeVInt(1) # RewardCount\n\n self.writeVInt(38) # ItemType\n self.writeVInt(1337) # Amount\n self.writeDataReference(0) # CsvID\n self.writeVInt(0) # SkinID\n\n self.writeVInt(0) # Currency(0-Gems, 1-Gold, 3-StarpoInts)\n self.writeVInt(0) # Cost\n self.writeVInt(0) # Time\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # Daily Offer\n self.writeVInt(0) # Old price\n self.writeString('Offer') # Text\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString(\"offer_bgr_xmas23\") # Background\n self.writeVInt(0)\n self.writeBoolean(False) # This purchase is already being processed\n self.writeVInt(0) # Type Benefit\n self.writeVInt(0) # Benefit\n self.writeString()\n self.writeBoolean(False) # One time offer\n self.writeBoolean(False) # Claimed\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n \n self.writeVInt(20)\n self.writeVInt(1428)\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n self.writeVInt(30)\n\n self.writeByte(1) # count brawlers selected\n self.writeDataReference(16, player.SelectedBrawlers[0]) # selected brawler\n self.writeString(player.Region) # location\n self.writeString(player.ContentCreator) # supported creator\n\n self.writeVInt(6) \n self.writeVInt(1) \n self.writeVInt(9) \n self.writeVInt(1) \n self.writeVInt(22) \n self.writeVInt(3) \n self.writeVInt(25) \n self.writeVInt(1) \n self.writeVInt(24) \n self.writeVInt(0)\n self.writeVInt(15)\n self.writeVInt(32447)\n self.writeVInt(28)\n\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n for season in range(1):\n self.writeVInt(22-1)\n self.writeVInt(40000)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(0) \n\n self.writeBoolean(True) # Vanity items\n self.writeVInt(len(player.OwnedThumbnails)+len(player.OwnedPins))\n for x in player.OwnedThumbnails:\n self.writeVInt(28)\n self.writeVInt(x)\n self.writeVInt(0)\n for x in player.OwnedPins:\n self.writeVInt(52)\n self.writeVInt(x)\n self.writeVInt(0)\n\n\n self.writeBoolean(False) # Power league season data\n\n self.writeInt(0)\n self.writeVInt(0)\n self.writeVInt(16)\n self.writeVInt(76)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2023189)\n\n self.writeVInt(35) # event slot id\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(3)\n self.writeVInt(4)\n self.writeVInt(5)\n self.writeVInt(6)\n self.writeVInt(7)\n self.writeVInt(8)\n self.writeVInt(9)\n self.writeVInt(10)\n self.writeVInt(11)\n self.writeVInt(12)\n self.writeVInt(13) \n self.writeVInt(14)\n self.writeVInt(15)\n self.writeVInt(16)\n self.writeVInt(17)\n self.writeVInt(18) \n self.writeVInt(19)\n self.writeVInt(20)\n self.writeVInt(21) \n self.writeVInt(22)\n self.writeVInt(23)\n self.writeVInt(24)\n self.writeVInt(25)\n self.writeVInt(26)\n self.writeVInt(27)\n self.writeVInt(28)\n self.writeVInt(29)\n self.writeVInt(30)\n self.writeVInt(31)\n self.writeVInt(32)\n self.writeVInt(33)\n self.writeVInt(34)\n self.writeVInt(35)\n\n self.writeVInt(1)\n\n self.writeVInt(4)\n self.writeVInt(7)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(72292)\n self.writeVInt(10) \n self.writeDataReference(15, 21) # map id\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeString(\"\")\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # MapMaker map structure array\n self.writeVInt(0)\n self.writeBoolean(False) # Power League array entry\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeVInt(0) \n self.writeVInt(0) \n self.writeVInt(0) \n self.writeBoolean(False) \n\n self.writeVInt(0)\n \n ByteStreamHelper.encodeIntList(self, [20, 35, 75, 140, 290, 480, 800, 1250, 1875, 2800])\n ByteStreamHelper.encodeIntList(self, [30, 80, 170, 360]) # Shop Coins Price\n ByteStreamHelper.encodeIntList(self, [300, 880, 2040, 4680]) # Shop Coins Amount\n\n self.writeVInt(0) \n\n self.writeVInt(1)\n self.writeVInt(41000086) # theme\n self.writeVInt(1)\n\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(4)\n\n ByteStreamHelper.encodeIntList(self, [0, 29, 79, 169, 349, 699])\n ByteStreamHelper.encodeIntList(self, [0, 160, 450, 500, 1250, 2500])\n\n self.writeLong(0, 1) # Player ID\n\n self.writeVInt(0) # Notification factory\n \n self.writeVInt(1)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeBoolean(False) # Login Calendar\n self.writeVInt(0)\n self.writeBoolean(True) # Starr Road\n for i in range(7):\n self.writeVInt(0)\n\n self.writeVInt(0) # Mastery\n\n #BattleCard\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n\n self.writeVInt(0) #Brawler's BattleCards\n\n self.writeVInt(5)\n for i in range(5):\n self.writeDataReference(80, i)\n self.writeVInt(-1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(86400*24)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(False)\n\n # end LogicClientHome\n\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeStringReference(player.Name)\n self.writeBoolean(player.Registered)\n self.writeInt(-1)\n\n self.writeVInt(17)\n unlocked_brawler = [i['CardID'] for x,i in player.OwnedBrawlers.items()]\n self.writeVInt(len(unlocked_brawler) + 2)\n for x in unlocked_brawler:\n self.writeDataReference(23, x)\n self.writeVInt(-1)\n self.writeVInt(1)\n\n self.writeDataReference(5, 8)\n self.writeVInt(-1)\n self.writeVInt(player.Coins)\n\n self.writeDataReference(5, 23)\n self.writeVInt(-1)\n self.writeVInt(player.Blings)\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"Trophies\"])\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroHighScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"HighestTrophies\"])\n\n self.writeVInt(0) # Array\n\n self.writeVInt(0) # HeroPower\n \n self.writeVInt(len(player.OwnedBrawlers)) # HeroLevel\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"PowerLevel\"]-1)\n\n self.writeVInt(0) # hero star power gadget and hypercharge\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroSeenState\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(2)\n\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n\n self.writeVInt(player.Gems) # Diamonds\n self.writeVInt(player.Gems) # Free Diamonds\n self.writeVInt(10) # Player Level\n self.writeVInt(100)\n self.writeVInt(0) # CumulativePurchasedDiamonds or Avatar User Level Tier | 10000 < Level Tier = 3 | 1000 < Level Tier = 2 | 0 < Level Tier = 1\n self.writeVInt(100) # Battle Count\n self.writeVInt(10) # WinCount\n self.writeVInt(80) # LoseCount\n self.writeVInt(50) # WinLooseStreak\n self.writeVInt(20) # NpcWinCount\n self.writeVInt(0) # NpcLoseCount\n self.writeVInt(2) # TutorialState | shouldGoToFirstTutorialBattle = State == 0\n self.writeVInt(12)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString()\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(1)\n\n def decode(self):\n fields = {}\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveServerMessage", "path": "Heart/Packets/Server/Socket/KeepAliveServerMessage.py", "snippet": "class KeepAliveServerMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "PlayerProfileMessage", "path": "Heart/Packets/Server/Home/PlayerProfileMessage.py", "snippet": "class PlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVLong(fields[\"PlayerHighID\"], fields[\"PlayerLowID\"])\n self.writeDataReference(16,11) # \n self.writeVInt(70)\n for i in range(70):\n self.writeDataReference(16, i)\n self.writeDataReference(0)\n self.writeVInt(500) # trophies\n self.writeVInt(1250) # highestTrophies\n self.writeVInt(11) #power level\n \n self.writeVInt(18)\n\n self.writeVInt(1) \n self.writeVInt(1) # 3v3 victories\n\n self.writeVInt(2)\n self.writeVInt(528859) # total exp\n\n self.writeVInt(3)\n self.writeVInt(3) # current trophies\n\n self.writeVInt(4)\n self.writeVInt(4) # highest trophies\n\n self.writeVInt(5) \n self.writeVInt(5) # unlocked brawler?\n\n self.writeVInt(8)\n self.writeVInt(6) # solo victories\n\n self.writeVInt(11) \n self.writeVInt(7) # duo victories\n\n self.writeVInt(9) \n self.writeVInt(8) # highest level robo rumble\n\n self.writeVInt(12) \n self.writeVInt(9) # highest level boss fight\n\n self.writeVInt(13)\n self.writeVInt(10) # highest power league points\n\n self.writeVInt(14)\n self.writeVInt(11) # some power league stuff\n\n self.writeVInt(15)\n self.writeVInt(12) # most challenge win\n\n self.writeVInt(16) #highest level city rampage\n self.writeVInt(13)\n\n self.writeVInt(18) #highest solo power league rank\n self.writeVInt(14)\n\n self.writeVInt(17) #highest team power league rank\n self.writeVInt(15)\n\n self.writeVInt(19) # highest Club league rank\n self.writeVInt(16)\n\n self.writeVInt(20) # number fame\n self.writeVInt(1000)\n\n self.writeVInt(21)\n self.writeVInt(502052) #v50\n\n self.writeString(player.Name) #PlayerInfo\n self.writeVInt(100)\n self.writeVInt(28000000 + player.Thumbnail)\n self.writeVInt(43000000 + player.Namecolor)\n self.writeVInt(14)\n\n self.writeBoolean(True)\n self.writeVInt(300)\n\n self.writeString(\"hello world\")\n self.writeVInt(100)\n self.writeVInt(200)\n self.writeDataReference(29, 558)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n\n self.writeBoolean(True) #alliance\n self.writeLong(0,1) #alliance ID\n self.writeString(\"haccers\") #alliance name\n self.writeDataReference(8,1) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(10000) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeDataReference(0)\n self.writeString(\"RU\") #location\n self.writeVInt(4) # unknown\n self.writeBoolean(True) #is Family friendly\n self.writeVInt(0)\n \n\n self.writeDataReference(25, 1) #alliance role\n self.writeVInt(16)\n\n def decode(self):\n pass\n # fields = {}\n # fields[\"PlayerCount\"] = self.readVInt()\n # fields[\"Text\"] = self.readString()\n # fields[\"Unk1\"] = self.readVInt()\n # super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24113\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "MyAllianceMessage", "path": "Heart/Packets/Server/Home/MyAllianceMessage.py", "snippet": "class MyAllianceMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1) # Online people in alliance\n self.writeBoolean(True) # isInAlliance\n self.writeDataReference(25, 4)\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(3) # type\n self.writeVInt(1) # member count\n self.writeVInt(9500) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(3) # unknown\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24399\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AllianceDataMessage", "path": "Heart/Packets/Server/Home/AllianceDataMessage.py", "snippet": "class AllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeBoolean(True)\n\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(player.Trophies) # total trophies\n self.writeVInt(0) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(1) # people online\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n self.writeString(\"this is the hacciest club in the world\")\n\n self.writeVInt(1) # member count\n self.writeLong(player.ID[0], player.ID[1]) # player ID\n self.writeVInt(2) # role\n self.writeVInt(player.Trophies) # trophies\n self.writeVInt(0) # status: 0=offline 2=online\n self.writeVInt(1) # last connected time seconds ?\n highestPowerLeagueRank = 2\n self.writeVInt(highestPowerLeagueRank)\n if highestPowerLeagueRank != 0:\n self.writeVInt(2) #solo\n self.writeVInt(1) #duo\n self.writeBoolean(False) # boolean always false?\n\n self.writeString(player.Name) # player name\n self.writeVInt(100) # VInt always 100\n self.writeVInt(28000000 + player.Thumbnail) # thumbnail\n self.writeVInt(43000000 + player.Namecolor) # name color\n self.writeVInt(46000000 + player.Namecolor)\n\n self.writeVInt(-1) # most people have it -1 but some with something\n self.writeBoolean(False) # whats this ? only 2/30 people have it true in my club\n week = 58 # week 58 of club league as of 2023/07/05, this number is 0 if you just arrived in the club\n self.writeVInt(week)\n if week != 0: # club league week number?\n self.writeVInt(3) # day\n self.writeVInt(18) # total club trophies earned\n self.writeVInt(0) # event day club trophies earned\n self.writeVInt(8) # total tickets used\n self.writeVInt(0) # event day tickets used\n self.writeVInt(6) # event day max tickets\n self.writeVInt(6) # event day tickets left\n self.writeVInt(0) # event day player ranking\n self.writeBoolean(True) # everyone have it to true\n self.writeVInt(200) # player experience lvl but why tf it doesn't show for some people\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24301\n\n def getMessageVersion(self):\n return self.messageVersion" } ]
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
14,187
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage',
10212: ChangeAvatarNameMessage,
3
2023-12-14 18:57:56+00:00
16k
pan-x-c/EE-LLM
megatron/core/models/gpt/gpt_layer_specs.py
[ { "identifier": "get_bias_dropout_add", "path": "megatron/core/fusions/fused_bias_dropout.py", "snippet": "def get_bias_dropout_add(training, fused):\n if fused:\n # jit scripting for a nn.module (with dropout) is not\n # triggering the fusion kernel. For now, we use two\n # different nn.functional routines to account for varying\n # dropout semantics during training and inference phases.\n if training:\n return bias_dropout_add_fused_train\n else:\n return bias_dropout_add_fused_inference\n else:\n return bias_dropout_add_unfused(training)" }, { "identifier": "FusedLayerNorm", "path": "megatron/core/fusions/fused_layer_norm.py", "snippet": "class FusedLayerNorm(torch.nn.Module):\n def __init__(\n self,\n hidden_size,\n eps=1e-5,\n persist_layer_norm=True,\n sequence_parallel=False,\n zero_centered_gamma=False,\n normalization=\"LayerNorm\",\n ):\n super().__init__()\n\n self.zero_centered_gamma = zero_centered_gamma\n self.normalization = normalization\n assert normalization == \"LayerNorm\", '({}) is not supported in ' 'FusedLayerNorm'.format(\n normalization\n )\n\n # List of hiddens sizes supported in the persistent layer norm kernel\n # If the hidden size is not supported, fall back to the non-persistent\n # kernel.\n persist_ln_hidden_sizes = [\n 1024,\n 1536,\n 2048,\n 2304,\n 3072,\n 3840,\n 4096,\n 5120,\n 6144,\n 8192,\n 10240,\n 12288,\n 12800,\n 15360,\n 16384,\n 18432,\n 20480,\n 24576,\n 25600,\n 30720,\n 32768,\n 40960,\n 49152,\n 65536,\n ]\n if hidden_size not in persist_ln_hidden_sizes or not HAVE_PERSIST_LAYER_NORM:\n persist_layer_norm = False\n\n if not persist_layer_norm and not HAVE_FUSED_LAYER_NORM:\n # TODO: Add pytorch only layer norm\n raise ValueError(f'Apex must currently be installed to use megatron core.')\n\n if isinstance(hidden_size, numbers.Integral):\n hidden_size = (hidden_size,)\n self.hidden_size = torch.Size(hidden_size)\n self.eps = eps\n self.weight = Parameter(torch.Tensor(*hidden_size))\n self.bias = Parameter(torch.Tensor(*hidden_size))\n self.reset_parameters()\n self.persist_layer_norm = persist_layer_norm\n self.sequence_parallel = sequence_parallel\n\n # set sequence parallelism flag on weight and bias parameters\n setattr(self.weight, 'sequence_parallel', self.sequence_parallel)\n setattr(self.bias, 'sequence_parallel', self.sequence_parallel)\n\n def reset_parameters(self):\n\n if self.zero_centered_gamma:\n init.zeros_(self.weight)\n init.zeros_(self.bias)\n else:\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, input):\n\n weight = self.weight + 1 if self.zero_centered_gamma else self.weight\n\n if self.persist_layer_norm:\n output = FastLayerNormFN.apply(input, weight, self.bias, self.eps)\n\n # Apex's fast layer norm function outputs a 'view' tensor (i.e., has\n # a populated '_base' field). This will result in schedule.py's\n # deallocate_output_tensor() throwing an error, so a viewless tensor is\n # created to prevent this.\n output = make_viewless_tensor(\n inp=output, requires_grad=input.requires_grad, keep_graph=True\n )\n\n else:\n output = FusedLayerNormAffineFunction.apply(\n input, weight, self.bias, self.hidden_size, self.eps\n )\n\n return output" }, { "identifier": "ColumnParallelLinear", "path": "megatron/core/tensor_parallel/layers.py", "snippet": "class ColumnParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with column parallelism.\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its second dimension as A = [A_1, ..., A_p].\n\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n\n Keyword Arguments\n bias: If true, add bias\n gather_output: If true, call all-gather on output and make Y available\n to all GPUs, otherwise, every GPU will have its output\n which is Y_i = XA_i\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n skip_bias_add: If True, do not add the bias term, instead\n return it to be added by the caller. This\n enables performance optimations where bias can\n be fused with other elementwise operations.\n skip_weight_param_allocation: If True, weight parameter is not allocated and must be passed\n as a keyword argument `weight` during the forward pass. Note\n that this does not affect bias, which will be allocated if\n bias is True. Defaults to False.\n is_expert: If True, the layer is treated as an MoE expert layer.\n config: ModelParallelConfig object\n\n \"\"\"\n\n def __init__(\n self,\n input_size,\n output_size,\n *,\n config: ModelParallelConfig,\n init_method: Callable,\n bias=True,\n gather_output=False,\n stride=1,\n keep_master_weight_for_test=False,\n skip_bias_add=False,\n skip_weight_param_allocation: bool = False,\n is_expert: bool = False,\n ):\n super(ColumnParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.gather_output = gather_output\n # Divide the weight matrix along the last dimension.\n world_size = get_tensor_model_parallel_world_size()\n self.output_size_per_partition = divide(output_size, world_size)\n self.skip_bias_add = skip_bias_add\n self.is_expert = is_expert\n self.expert_parallel = config.expert_model_parallel_size > 1\n self.config = config\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n # Initialize weight.\n if not skip_weight_param_allocation:\n if config.use_cpu_initialization:\n self.weight = Parameter(\n torch.empty(\n self.output_size_per_partition, self.input_size, dtype=config.params_dtype\n )\n )\n if config.perform_initialization:\n self.master_weight = _initialize_affine_weight_cpu(\n self.weight,\n self.output_size,\n self.input_size,\n self.output_size_per_partition,\n 0,\n init_method,\n stride=stride,\n return_master_weight=keep_master_weight_for_test,\n )\n else:\n self.weight = Parameter(\n torch.empty(\n self.output_size_per_partition,\n self.input_size,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n if config.perform_initialization:\n _initialize_affine_weight_gpu(\n self.weight,\n init_method,\n partition_dim=0,\n stride=stride,\n expert_parallel=(self.is_expert and self.expert_parallel),\n )\n\n setattr(self.weight, 'allreduce', not (self.is_expert and self.expert_parallel))\n else:\n self.weight = None\n\n if bias:\n if config.use_cpu_initialization:\n self.bias = Parameter(\n torch.empty(self.output_size_per_partition, dtype=config.params_dtype)\n )\n else:\n self.bias = Parameter(\n torch.empty(\n self.output_size_per_partition,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n set_tensor_model_parallel_attributes(self.bias, True, 0, stride)\n if config.perform_initialization:\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n setattr(self.bias, 'allreduce', not (self.is_expert and self.expert_parallel))\n else:\n self.register_parameter('bias', None)\n\n self.async_tensor_model_parallel_allreduce = (\n config.async_tensor_model_parallel_allreduce and world_size > 1\n )\n\n self.sequence_parallel = config.sequence_parallel\n if self.sequence_parallel and world_size <= 1:\n warnings.warn(\n f\"`sequence_parallel` is set to `True`, but tensor model parallel size is {world_size}. \"\n f\"Disabling sequence parallel.\"\n )\n self.sequence_parallel = False\n\n if config.gradient_accumulation_fusion and not _grad_accum_fusion_available:\n raise RuntimeError(\n \"ColumnParallelLinear was called with gradient_accumulation_fusion set \"\n \"to True but the custom CUDA extension fused_weight_gradient_mlp_cuda \"\n \"module is not found. To use gradient_accumulation_fusion you must \"\n \"install APEX with --cpp_ext and --cuda_ext. For example: \"\n \"pip install --global-option=\\\"--cpp_ext\\\" --global-option=\\\"--cuda_ext .\\\" \"\n \"Note that the extension requires CUDA>=11. Otherwise, you must turn off \"\n \"gradient accumulation fusion.\"\n )\n self.gradient_accumulation_fusion = config.gradient_accumulation_fusion\n\n if self.async_tensor_model_parallel_allreduce and self.sequence_parallel:\n raise RuntimeError(\n \"`async_tensor_model_parallel_allreduce` and `sequence_parallel` \"\n \"cannot be enabled at the same time.\"\n )\n\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n self.explicit_expert_comm = self.is_expert and (\n self.sequence_parallel or self.expert_parallel\n )\n\n def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None):\n \"\"\"Forward of ColumnParallelLinear\n\n Args:\n input_: 3D tensor whose order of dimension is [sequence, batch, hidden]\n\n weight (optional): weight tensor to use, compulsory when\n skip_weight_param_allocation is True.\n\n Returns:\n - output\n - bias\n\n \"\"\"\n if weight is None:\n if self.weight is None:\n raise RuntimeError(\n \"weight was not supplied to ColumnParallelLinear forward pass \"\n \"and skip_weight_param_allocation is True.\"\n )\n weight = self.weight\n else:\n # Check the weight passed in is the correct shape\n expected_shape = (self.output_size_per_partition, self.input_size)\n if weight.shape != expected_shape:\n raise RuntimeError(\n f\"supplied weight's shape is {tuple(weight.shape)}, \"\n f\"not {expected_shape} as expected\"\n )\n\n bias = self.bias if not self.skip_bias_add else None\n\n if (\n self.async_tensor_model_parallel_allreduce\n or self.sequence_parallel\n or self.explicit_expert_comm\n ):\n input_parallel = input_\n else:\n input_parallel = copy_to_tensor_model_parallel_region(input_)\n\n # Matrix multiply.\n if not weight.requires_grad:\n self._forward_impl = linear_with_frozen_weight\n else:\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n output_parallel = self._forward_impl(\n input=input_parallel,\n weight=weight,\n bias=bias,\n gradient_accumulation_fusion=self.gradient_accumulation_fusion,\n async_grad_allreduce=False\n if self.explicit_expert_comm\n else self.async_tensor_model_parallel_allreduce,\n sequence_parallel=False if self.explicit_expert_comm else self.sequence_parallel,\n )\n if self.gather_output:\n # All-gather across the partitions.\n assert not self.sequence_parallel\n output = gather_from_tensor_model_parallel_region(output_parallel)\n else:\n output = output_parallel\n output_bias = self.bias if self.skip_bias_add else None\n return output, output_bias" }, { "identifier": "RowParallelLinear", "path": "megatron/core/tensor_parallel/layers.py", "snippet": "class RowParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with row parallelism.\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its first dimension and X along its second dimension as:\n - -\n | A_1 |\n | . |\n A = | . | X = [X_1, ..., X_p]\n | . |\n | A_p |\n - -\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n\n Keyword Arguments:\n bias: If true, add bias. Note that bias is not parallelized.\n input_is_parallel: If true, we assume that the input is already\n split across the GPUs and we do not split\n again.\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n skip_bias_add: If True, do not add the bias term, instead\n return it to be added by the caller. This\n enables performance optimations where bias can\n be fused with other elementwise operations.\n is_expert: If True, the layer is treated as an MoE expert layer\n config: ModelParallelConfig object\n\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n *,\n config: ModelParallelConfig,\n init_method: Callable,\n bias: bool = True,\n input_is_parallel: bool = False,\n stride: int = 1,\n keep_master_weight_for_test: bool = False,\n skip_bias_add: bool = False,\n is_expert: bool = False,\n ):\n super(RowParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.input_is_parallel = input_is_parallel\n # Divide the weight matrix along the last dimension.\n world_size = get_tensor_model_parallel_world_size()\n self.input_size_per_partition = divide(input_size, world_size)\n self.skip_bias_add = skip_bias_add\n self.config = config\n self.is_expert = is_expert\n self.expert_parallel = config.expert_model_parallel_size > 1\n self.gradient_accumulation_fusion = config.gradient_accumulation_fusion\n self.sequence_parallel = config.sequence_parallel\n if self.sequence_parallel and not self.input_is_parallel:\n raise RuntimeError(\"To enable `sequence_parallel`, `input_is_parallel` must be `True`\")\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n # Initialize weight.\n if config.use_cpu_initialization:\n self.weight = Parameter(\n torch.empty(\n self.output_size, self.input_size_per_partition, dtype=config.params_dtype\n )\n )\n if config.perform_initialization:\n self.master_weight = _initialize_affine_weight_cpu(\n self.weight,\n self.output_size,\n self.input_size,\n self.input_size_per_partition,\n 1,\n init_method,\n stride=stride,\n return_master_weight=keep_master_weight_for_test,\n params_dtype=config.params_dtype,\n )\n else:\n self.weight = Parameter(\n torch.empty(\n self.output_size,\n self.input_size_per_partition,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n if config.perform_initialization:\n _initialize_affine_weight_gpu(\n self.weight,\n init_method,\n partition_dim=1,\n stride=stride,\n expert_parallel=(self.is_expert and self.expert_parallel),\n )\n setattr(self.weight, 'allreduce', not (self.is_expert and self.expert_parallel))\n\n if bias:\n if config.use_cpu_initialization:\n self.bias = Parameter(torch.empty(self.output_size, dtype=config.params_dtype))\n else:\n self.bias = Parameter(\n torch.empty(\n self.output_size,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n\n if config.perform_initialization:\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n setattr(self.bias, 'allreduce', not (self.is_expert and self.expert_parallel))\n setattr(self.bias, 'sequence_parallel', self.sequence_parallel)\n else:\n self.register_parameter('bias', None)\n\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n self.explicit_expert_comm = self.is_expert and (\n self.sequence_parallel or self.expert_parallel\n )\n\n def forward(self, input_):\n \"\"\"Forward of RowParallelLinear\n\n Args:\n input_: 3D tensor whose order of dimension is [sequence, batch, hidden]\n\n Returns:\n - output\n - bias\n \"\"\"\n # Set up backprop all-reduce.\n if self.input_is_parallel:\n input_parallel = input_\n else:\n assert not self.sequence_parallel\n input_parallel = scatter_to_tensor_model_parallel_region(input_)\n # Matrix multiply.\n if not self.weight.requires_grad:\n self._forward_impl = linear_with_frozen_weight\n else:\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n output_parallel = self._forward_impl(\n input=input_parallel,\n weight=self.weight,\n bias=None,\n gradient_accumulation_fusion=self.gradient_accumulation_fusion,\n async_grad_allreduce=False,\n sequence_parallel=False,\n )\n\n # All-reduce across all the partitions.\n if self.explicit_expert_comm:\n assert self.skip_bias_add\n output_ = output_parallel\n elif self.sequence_parallel:\n output_ = reduce_scatter_to_sequence_parallel_region(output_parallel)\n else:\n output_ = reduce_from_tensor_model_parallel_region(output_parallel)\n if not self.skip_bias_add:\n output = (output_ + self.bias) if self.bias is not None else output_\n output_bias = None\n else:\n output = output_\n output_bias = self.bias\n return output, output_bias" }, { "identifier": "SelfAttention", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttention(Attention):\n \"\"\"Self-attention layer class\n\n Self-attention layer takes input with size [s, b, h]\n and returns output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: SelfAttentionSubmodules,\n layer_number: int = 1,\n attn_mask_type=AttnMaskType.padding,\n **kwargs,\n ):\n super().__init__(\n config=config,\n submodules=submodules,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type,\n **kwargs,\n )\n\n self.linear_qkv = build_module(\n submodules.linear_qkv,\n self.config.hidden_size,\n self.query_projection_size + 2 * self.kv_projection_size,\n config=self.config,\n init_method=self.config.init_method,\n bias=self.config.add_bias_linear,\n skip_bias_add=False,\n )\n\n def get_query_key_value_tensors(self, hidden_states, key_value_states=None):\n \"\"\"\n Derives `query`, `key` and `value` tensors from `hidden_states`.\n \"\"\"\n # Attention heads [sq, b, h] --> [sq, b, ng * (np/ng + 2) * hn)]\n mixed_qkv, _ = self.linear_qkv(hidden_states)\n\n # [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]\n new_tensor_shape = mixed_qkv.size()[:-1] + (\n self.num_query_groups_per_partition,\n (\n (self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)\n * self.hidden_size_per_attention_head\n ),\n )\n mixed_qkv = mixed_qkv.view(*new_tensor_shape)\n\n # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn]\n (query, key, value) = torch.split(\n mixed_qkv,\n [\n (\n self.num_attention_heads_per_partition\n // self.num_query_groups_per_partition\n * self.hidden_size_per_attention_head\n ),\n self.hidden_size_per_attention_head,\n self.hidden_size_per_attention_head,\n ],\n dim=3,\n )\n # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]\n query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head)\n\n return query, key, value" }, { "identifier": "SelfAttentionSubmodules", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttentionSubmodules:\n linear_qkv: Union[ModuleSpec, type] = None\n dot_product_attention: Union[ModuleSpec, type] = None\n linear_proj: Union[ModuleSpec, type] = None" }, { "identifier": "TEDotProductAttention", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TEDotProductAttention(te.pytorch.DotProductAttention):\n \"\"\"\n Wrapper for the Transformer-Engine's `DotProductAttention` layer that also\n has \"flash attention\" enabled.\n\n Note that if Megatron's parallel_state has not been initialized yet, the\n tp_group and cp_group passed to TE will be None and must be set later\n via set_tensor_parallel_group() and set_context_parallel_group().\n \"\"\"\n\n cp_stream: torch.cuda.Stream = None\n\n def __init__(\n self,\n config: TransformerConfig,\n layer_number: int = 1,\n attn_mask_type: AttnMaskType = AttnMaskType.padding,\n **kwargs\n ):\n self.config = config\n\n # Only Transformer-Engine version > 0.13.0 supports context parallelism\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version > packaging.version.Version(\"0.13.0\"):\n if getattr(TEDotProductAttention, \"cp_stream\") is None:\n TEDotProductAttention.cp_stream = torch.cuda.Stream()\n kwargs[\"cp_group\"] = get_context_parallel_group(check_initialized=False)\n kwargs[\"cp_global_ranks\"] = get_context_parallel_global_ranks(check_initialized=False)\n kwargs[\"cp_stream\"] = TEDotProductAttention.cp_stream\n else:\n assert (\n self.config.context_parallel_size == 1\n ), \"Only Transformer-Engine version > 0.13.0 supports context parallelism\"\n\n super().__init__(\n num_attention_heads=self.config.num_attention_heads,\n kv_channels=self.config.kv_channels,\n attention_dropout=self.config.attention_dropout,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type.name,\n sequence_parallel=self.config.sequence_parallel,\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n **kwargs,\n )" }, { "identifier": "TELayerNormColumnParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TELayerNormColumnParallelLinear(te.pytorch.LayerNormLinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `LayerNormLinear` layer that combines\n layernorm and linear layers\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n config: TransformerConfig,\n init_method: Callable,\n bias: bool,\n skip_bias_add: bool,\n **kwargs\n ):\n self.config = config\n # TE returns a zero length Tensor when bias=False and\n # return_bias=True, but we prefer None. So in that case we\n # tell TE to not return the bias, and return None\n # ourselves. This way our forward always returns two values\n # and we don't have to deal with the zero length Tensor.\n self.te_return_bias = skip_bias_add and bias\n\n # Only Transformer-Engine version >= 0.11.0 supports `RMSNorm`\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version >= packaging.version.Version(\"0.11.0\"):\n kwargs[\"normalization\"] = self.config.normalization\n\n super().__init__(\n in_features=input_size,\n out_features=output_size,\n bias=bias,\n sequence_parallel=self.config.sequence_parallel,\n fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n init_method=init_method,\n params_dtype=self.config.params_dtype,\n parallel_mode=\"column\",\n return_bias=self.te_return_bias,\n **_get_extra_te_kwargs(config),\n )\n\n def forward(self, x):\n out = super().forward(x)\n\n # TE only returns a tuple when return_bias is True, otherwise\n # it returns a single Tensor, we always want to return two\n # values regardless of the arguments.\n if self.te_return_bias:\n return out\n return out, None" }, { "identifier": "TERowParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TERowParallelLinear(TELinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `Linear` layer but specialized similar\n to megatron's `RowParallelLinear` layer.\n \"\"\"\n\n def __init__(self, input_size: int, output_size: int, config: TransformerConfig, **kwargs):\n self.config = config\n super().__init__(\n input_size=input_size,\n output_size=output_size,\n config=self.config,\n parallel_mode=\"row\",\n **kwargs,\n )" }, { "identifier": "DotProductAttention", "path": "megatron/core/transformer/dot_product_attention.py", "snippet": "class DotProductAttention(MegatronModule):\n \"\"\"\n Region where selective activation recomputation is applied.\n This region is memory intensive but less compute intensive which\n makes activation checkpointing more efficient for LLMs (20B+).\n See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details.\n\n We use the following notation:\n h: hidden size\n n: number of attention heads\n p: number of tensor model parallel partitions\n b: batch size\n s: sequence length\n \"\"\"\n\n def __init__(\n self, config: TransformerConfig, layer_number: int = 1, attn_mask_type=AttnMaskType.padding\n ):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n assert (\n self.config.context_parallel_size == 1\n ), \"Context parallelism is only supported by TEDotProductAttention!\"\n\n self.layer_number = max(1, layer_number)\n self.attn_mask_type = attn_mask_type\n\n projection_size = self.config.kv_channels * config.num_attention_heads\n\n # Per attention head and per partition values.\n world_size = parallel_state.get_tensor_model_parallel_world_size()\n self.hidden_size_per_partition = divide(projection_size, world_size)\n self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads)\n self.num_attention_heads_per_partition = divide(config.num_attention_heads, world_size)\n\n coeff = None\n self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)\n if self.config.apply_query_key_layer_scaling:\n coeff = self.layer_number\n self.norm_factor *= coeff\n\n self.scale_mask_softmax = FusedScaleMaskSoftmax(\n input_in_fp16=self.config.fp16,\n input_in_bf16=self.config.bf16,\n attn_mask_type=self.attn_mask_type,\n scaled_masked_softmax_fusion=self.config.masked_softmax_fusion,\n mask_func=attention_mask_func,\n softmax_in_fp32=self.config.attention_softmax_in_fp32,\n scale=coeff,\n )\n\n # Dropout. Note that for a single iteration, this layer will generate\n # different outputs on different number of parallel partitions but\n # on average it should not be partition dependent.\n self.attention_dropout = torch.nn.Dropout(self.config.attention_dropout)\n\n def forward(\n self, query_layer: Tensor, key_layer: Tensor, value_layer: Tensor, attention_mask: Tensor\n ):\n\n # ===================================\n # Raw attention scores. [b, n/p, s, s]\n # ===================================\n\n # [b, np, sq, sk]\n output_size = (\n query_layer.size(1),\n query_layer.size(2),\n query_layer.size(0),\n key_layer.size(0),\n )\n\n # [sq, b, np, hn] -> [sq, b * np, hn]\n # This will be a simple view when doing normal attention, but in group query attention\n # the key and value tensors are repeated to match the queries so you can't use simple strides\n # to extract the queries.\n query_layer = query_layer.reshape(output_size[2], output_size[0] * output_size[1], -1)\n # [sk, b, np, hn] -> [sk, b * np, hn]\n key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)\n\n # preallocting input tensor: [b * np, sq, sk]\n matmul_input_buffer = parallel_state.get_global_memory_buffer().get_tensor(\n (output_size[0] * output_size[1], output_size[2], output_size[3]),\n query_layer.dtype,\n \"mpu\",\n )\n\n # Raw attention scores. [b * np, sq, sk]\n matmul_result = torch.baddbmm(\n matmul_input_buffer,\n query_layer.transpose(0, 1), # [b * np, sq, hn]\n key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]\n beta=0.0,\n alpha=(1.0 / self.norm_factor),\n )\n\n # change view to [b, np, sq, sk]\n attention_scores = matmul_result.view(*output_size)\n\n # ===========================\n # Attention probs and dropout\n # ===========================\n\n # attention scores and attention mask [b, np, sq, sk]\n attention_probs: Tensor = self.scale_mask_softmax(attention_scores, attention_mask)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n\n if not self.config.sequence_parallel:\n with tensor_parallel.get_cuda_rng_tracker().fork():\n attention_probs = self.attention_dropout(attention_probs)\n else:\n attention_probs = self.attention_dropout(attention_probs)\n\n # =========================\n # Context layer. [sq, b, hp]\n # =========================\n\n # value_layer -> context layer.\n # [sk, b, np, hn] --> [b, np, sq, hn]\n\n # context layer shape: [b, np, sq, hn]\n output_size = (\n value_layer.size(1),\n value_layer.size(2),\n query_layer.size(0),\n value_layer.size(3),\n )\n\n # change view [sk, b * np, hn]\n value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)\n\n # change view [b * np, sq, sk]\n attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)\n\n # matmul: [b * np, sq, hn]\n context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))\n\n # change view [b, np, sq, hn]\n context_layer = context_layer.view(*output_size)\n\n # [b, np, sq, hn] --> [sq, b, np, hn]\n context_layer = context_layer.permute(2, 0, 1, 3).contiguous()\n\n # [sq, b, np, hn] --> [sq, b, hp]\n new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n return context_layer" }, { "identifier": "AttnMaskType", "path": "megatron/core/transformer/enums.py", "snippet": "class AttnMaskType(enum.Enum):\n padding = 1\n causal = 2" }, { "identifier": "MLP", "path": "megatron/core/transformer/mlp.py", "snippet": "class MLP(MegatronModule):\n \"\"\"\n MLP will take the input with h hidden state, project it to 4*h\n hidden dimension, perform nonlinear transformation, and project the\n state back into h hidden dimension.\n\n\n Returns an output and a bias to be added to the output.\n If config.add_bias_linear is False, the bias returned is None.\n\n We use the following notation:\n h: hidden size\n p: number of tensor model parallel partitions\n b: batch size\n s: sequence length\n \"\"\"\n\n def __init__(\n self, config: TransformerConfig, submodules: MLPSubmodules, is_expert: bool = False\n ):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n # If this is a gated linear unit we double the output width, see https://arxiv.org/pdf/2002.05202.pdf\n ffn_hidden_size = self.config.ffn_hidden_size\n if self.config.gated_linear_unit:\n ffn_hidden_size *= 2\n\n self.linear_fc1 = build_module(\n submodules.linear_fc1,\n self.config.hidden_size,\n ffn_hidden_size,\n config=self.config,\n init_method=self.config.init_method,\n gather_output=False,\n bias=self.config.add_bias_linear,\n skip_bias_add=True,\n is_expert=is_expert,\n )\n\n if self.config.gated_linear_unit:\n\n def glu(x):\n x = torch.chunk(x, 2, dim=-1)\n return self.config.activation_func(x[0]) * x[1]\n\n self.activation_func = glu\n else:\n self.activation_func = self.config.activation_func\n\n self.linear_fc2 = build_module(\n submodules.linear_fc2,\n self.config.ffn_hidden_size,\n self.config.hidden_size,\n config=self.config,\n init_method=self.config.output_layer_init_method,\n bias=self.config.add_bias_linear,\n input_is_parallel=True,\n skip_bias_add=True,\n is_expert=is_expert,\n )\n\n def forward(self, hidden_states):\n\n # [s, b, 4 * h/p]\n intermediate_parallel, bias_parallel = self.linear_fc1(hidden_states)\n\n if self.config.bias_gelu_fusion:\n assert self.config.add_bias_linear is True\n assert self.activation_func == F.gelu\n intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel)\n else:\n if bias_parallel is not None:\n intermediate_parallel = intermediate_parallel + bias_parallel\n intermediate_parallel = self.activation_func(intermediate_parallel)\n\n # [s, b, h]\n output, output_bias = self.linear_fc2(intermediate_parallel)\n\n return output, output_bias" }, { "identifier": "MLPSubmodules", "path": "megatron/core/transformer/mlp.py", "snippet": "class MLPSubmodules:\n linear_fc1: Union[ModuleSpec, type] = None\n linear_fc2: Union[ModuleSpec, type] = None" }, { "identifier": "ModuleSpec", "path": "megatron/core/transformer/spec_utils.py", "snippet": "class ModuleSpec:\n \"\"\"This is a Module Specification dataclass.\n\n Specification defines the location of the module (to import dynamically)\n or the imported module itself. It also defines the params that need to be\n passed to initialize the module.\n\n Args:\n module (Union[Tuple, type]): A tuple describing the location of the\n module class e.g. `(module.location, ModuleClass)` or the imported\n module class itself e.g. `ModuleClass` (which is already imported\n using `from module.location import ModuleClass`).\n params (dict): A dictionary of params that need to be passed while init.\n\n \"\"\"\n\n module: Union[Tuple, type]\n params: dict = field(default_factory=lambda: {})\n submodules: type = None" }, { "identifier": "SwitchMLP", "path": "megatron/core/transformer/switch_mlp.py", "snippet": "class SwitchMLP(MegatronModule):\n \"\"\"\n Top-1 Mixture of Experts Layer. Routes input to one of N MLP \"experts\"\n Curently supports Sinkhorn based expert routing.\n \"\"\"\n\n def __init__(self, config: TransformerConfig, submodules: MLPSubmodules):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n self.router = torch.nn.Linear(self.config.hidden_size, self.config.num_moe_experts)\n self.add_bias = config.add_bias_linear\n self.sequence_parallel = config.sequence_parallel\n self.route_algo = sinkhorn\n self.router_activation = torch.sigmoid\n self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size()\n\n assert self.config.num_moe_experts % self.expert_parallel_size == 0\n self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size\n local_expert_indices_offset = (\n parallel_state.get_expert_model_parallel_rank() * self.num_local_experts\n )\n self.local_expert_indices = [\n local_expert_indices_offset + i for i in range(self.num_local_experts)\n ]\n\n self.local_experts = torch.nn.ModuleList()\n for _ in range(self.num_local_experts):\n expert = MLP(self.config, submodules, is_expert=True)\n self.local_experts.append(expert)\n\n def gather_indices(self, local_indices):\n \"\"\" Gather tensors and concatenate along the first dimension.\"\"\"\n group = get_tensor_and_expert_parallel_group()\n world_size = torch.distributed.get_world_size(group=group)\n # Bypass the function if we are using only 1 GPU.\n if world_size == 1:\n return local_indices\n\n dim_size = list(local_indices.size())\n dim_size[0] = dim_size[0] * world_size\n\n # TODO pre allocate memory\n output = torch.empty(\n dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device()\n )\n torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group)\n return output\n\n def forward(self, hidden_states):\n hidden_shape = hidden_states.shape\n route = self.router(hidden_states)\n route = route.view(-1, self.config.num_moe_experts)\n\n if self.training:\n with torch.no_grad():\n norm_route = self.route_algo(\n route.detach().to(dtype=torch.float32)\n ) # explicit fp32 conversion for stability\n _, max_ind = torch.max(norm_route, dim=1)\n route = self.router_activation(route)\n max_prob = route[torch.arange(route.size(0)), max_ind]\n else:\n route = self.router_activation(route)\n max_prob, max_ind = torch.max(route, dim=1)\n\n max_prob = torch.unsqueeze(max_prob, 1)\n hidden_states = hidden_states.view(-1, hidden_shape[-1])\n\n if self.sequence_parallel or (self.expert_parallel_size > 1):\n global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe(\n hidden_states\n )\n global_indices = self.gather_indices(max_ind)\n else:\n global_hidden_states = hidden_states\n global_indices = max_ind\n\n output_total = torch.zeros_like(global_hidden_states)\n if self.add_bias:\n output_bias_total = torch.zeros_like(global_hidden_states)\n\n for expert_num, expert in enumerate(self.local_experts):\n local_expert_index = self.local_expert_indices[expert_num]\n local_indices = (global_indices == local_expert_index).nonzero()\n hidden = global_hidden_states[local_indices, :]\n output, output_bias = expert(hidden)\n\n output_total[local_indices, :] = output\n if self.add_bias:\n output_bias = output_bias.expand_as(output)\n output_bias_total[local_indices, :] = output_bias\n\n if self.sequence_parallel or (self.expert_parallel_size > 1):\n output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe(\n output_total\n )\n if self.add_bias:\n output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe(\n output_bias_total\n )\n # bias is duplicated across tensor parallelism ranks;\n # reduce scatter reduces bias across tensor parallel_ranks\n output_bias_total = (\n output_bias_total / parallel_state.get_tensor_model_parallel_world_size()\n )\n\n output_total = output_total * max_prob\n output_total = output_total.view(hidden_shape)\n if self.add_bias:\n output_bias_total = output_bias_total * max_prob\n output_bias_total = output_bias_total.view(hidden_shape)\n else:\n output_bias_total = None\n\n return output_total, output_bias_total" }, { "identifier": "TransformerLayer", "path": "megatron/core/transformer/transformer_layer.py", "snippet": "class TransformerLayer(MegatronModule):\n \"\"\"A single transformer layer.\n\n Transformer layer takes input with size [s, b, h] and returns an\n output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: TransformerLayerSubmodules,\n layer_number: int = 1,\n self_attn_mask_type=AttnMaskType.padding,\n ):\n super().__init__(config=config)\n self.config: TransformerConfig = config\n\n self.layer_number = layer_number + self._get_layer_offset()\n\n self.self_attn_mask_type = self_attn_mask_type\n\n ## [Module 1: Input Layernorm] Optional Layernorm on the input data\n # TODO: add pytorch only layernorm\n self.input_layernorm = build_module(\n submodules.input_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 2: SelfAttention]\n self.self_attention = build_module(\n submodules.self_attention, config=self.config, layer_number=layer_number,\n )\n\n ## [Module 3: BiasDropoutFusion]\n self.self_attn_bda = build_module(submodules.self_attn_bda)\n\n ## [Module 4: Post SelfAttention] Optional Layernorm after self-attn\n self.pre_cross_attn_layernorm = build_module(\n submodules.pre_cross_attn_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 5: CrossAttention]\n self.cross_attention = build_module(\n submodules.cross_attention, config=self.config, layer_number=layer_number,\n )\n\n ## [Module 6: BiasDropoutFusion]\n self.cross_attn_bda = build_module(submodules.cross_attn_bda)\n\n ## [Module 7: Post Cross Attention] Optional Layernorm after cross-attn\n self.pre_mlp_layernorm = build_module(\n submodules.pre_mlp_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 8: MLP block]\n # TODO how to set the gpt_layer_spec.py when we have moe_frequency > 1,\n # where MLP and SwitchMLP both appear alternately?\n self.mlp = build_module(submodules.mlp, config=self.config)\n\n ## [Module 9: BiasDropoutFusion]\n self.mlp_bda = build_module(submodules.mlp_bda)\n\n # @jcasper how should we handle nvfuser?\n # Set bias+dropout+add fusion grad_enable execution handler.\n # TORCH_MAJOR = int(torch.__version__.split('.')[0])\n # TORCH_MINOR = int(torch.__version__.split('.')[1])\n # use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)\n # self.bias_dropout_add_exec_handler = nullcontext if use_nvfuser else torch.enable_grad\n self.bias_dropout_add_exec_handler = torch.enable_grad\n\n def _get_layer_offset(self):\n\n pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()\n\n num_layers_per_pipeline_rank = (\n self.config.num_layers // parallel_state.get_pipeline_model_parallel_world_size()\n )\n\n if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:\n vp_rank = parallel_state.get_virtual_pipeline_model_parallel_rank()\n vp_size = parallel_state.get_virtual_pipeline_model_parallel_world_size()\n\n total_num_layers = self.config.num_layers\n num_layers_per_virtual_rank = num_layers_per_pipeline_rank // vp_size\n total_virtual_chunks = total_num_layers // vp_size\n offset = vp_rank * total_virtual_chunks + (pipeline_rank * num_layers_per_virtual_rank)\n\n else:\n # Each stage gets a contiguous set of layers.\n if parallel_state.get_pipeline_model_parallel_world_size() > 1:\n offset = pipeline_rank * num_layers_per_pipeline_rank\n else:\n offset = 0\n\n return offset\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n context=None,\n context_mask=None,\n inference_params=None,\n rotary_pos_emb=None,\n ):\n # hidden_states: [s, b, h]\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Input Layer norm\n input_layernorm_output = self.input_layernorm(hidden_states)\n\n # Self attention.\n attention_output_with_bias = self.self_attention(\n input_layernorm_output,\n attention_mask=attention_mask,\n inference_params=inference_params,\n rotary_pos_emb=rotary_pos_emb,\n )\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.self_attn_bda(self.training, self.config.bias_dropout_fusion)(\n attention_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Layer norm after self-attention\n pre_cross_attn_layernorm_output = self.pre_cross_attn_layernorm(hidden_states)\n\n # Cross attention.\n attention_output_with_bias = self.cross_attention(\n pre_cross_attn_layernorm_output,\n attention_mask=attention_mask,\n context=context,\n inference_params=inference_params,\n )\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.cross_attn_bda(self.training, self.config.bias_dropout_fusion)(\n attention_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Layer norm post the cross-attention.\n pre_mlp_layernorm_output = self.pre_mlp_layernorm(hidden_states)\n\n # MLP.\n mlp_output_with_bias = self.mlp(pre_mlp_layernorm_output)\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.mlp_bda(self.training, self.config.bias_dropout_fusion)(\n mlp_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Jit compiled function creates 'view' tensor. This tensor\n # potentially gets saved in the MPU checkpoint function context,\n # which rejects view tensors. While making a viewless tensor here\n # won't result in memory savings (like the data loader, or\n # p2p_communication), it serves to document the origin of this\n # 'view' tensor.\n output = make_viewless_tensor(\n inp=hidden_states, requires_grad=hidden_states.requires_grad, keep_graph=True\n )\n\n return output\n\n def sharded_state_dict(self, prefix=''):\n\n # state_dict = self.state_dict(prefix=prefix, keep_vars=True)\n state_dict = self.state_dict(keep_vars=True)\n\n tensor_parallel_layers_axis_map = {\n 'self_attention.linear_qkv.weight': 0,\n 'self_attention.linear_qkv.bias': 0,\n 'self_attention.linear_proj.weight': 1,\n 'mlp.linear_fc1.weight': 0,\n 'mlp.linear_fc1.bias': 0,\n 'mlp.linear_fc2.weight': 1,\n }\n\n offset = self._get_layer_offset()\n num_layers = self.config.num_layers\n\n sharded_state_dict = {}\n\n for layer_name in state_dict.keys():\n tensor = state_dict[layer_name]\n global_layer_offset = self.layer_number - 1 # self.layer_number starts at 1\n layer_key = f'{prefix}{global_layer_offset - offset}.{layer_name}' # module list index in TransformerBlock\n sharded_offsets = [(0, global_layer_offset, num_layers)] # PP sharding\n\n if layer_name in tensor_parallel_layers_axis_map:\n tp_axis = tensor_parallel_layers_axis_map[layer_name]\n # TP sharding\n sharded_offsets.append(\n [\n tp_axis + 1, # +1 for PP dimension\n parallel_state.get_tensor_model_parallel_rank(),\n parallel_state.get_tensor_model_parallel_world_size(),\n ]\n )\n replica_id = parallel_state.get_data_parallel_rank()\n else:\n replica_id = (\n parallel_state.get_data_parallel_rank()\n * parallel_state.get_data_parallel_world_size()\n + parallel_state.get_tensor_model_parallel_rank()\n )\n\n if layer_name.endswith('._extra_state'):\n sharded_state_dict[layer_key] = ShardedObject(\n f'{prefix}{layer_name}',\n tensor,\n (num_layers,),\n (global_layer_offset,),\n replica_id,\n )\n\n else:\n sharded_state_dict[layer_key] = ShardedTensor.from_rank_offsets(\n f'{prefix}{layer_name}',\n tensor,\n *sharded_offsets,\n replica_id=replica_id,\n prepend_axis_num=1, # for PP sharding\n )\n\n return sharded_state_dict" }, { "identifier": "TransformerLayerSubmodules", "path": "megatron/core/transformer/transformer_layer.py", "snippet": "class TransformerLayerSubmodules:\n input_layernorm: Union[ModuleSpec, type] = IdentityOp\n self_attention: Union[ModuleSpec, type] = IdentityOp\n self_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_cross_attn_layernorm: Union[ModuleSpec, type] = IdentityOp\n cross_attention: Union[ModuleSpec, type] = IdentityOp\n cross_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_mlp_layernorm: Union[ModuleSpec, type] = IdentityOp\n mlp: Union[ModuleSpec, type] = IdentityOp\n mlp_bda: Union[ModuleSpec, type] = IdentityFuncOp" } ]
from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import ( TEDotProductAttention, TELayerNormColumnParallelLinear, TERowParallelLinear, ) from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
12,878
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec(
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec(
module=SelfAttention,
4
2023-12-07 08:29:38+00:00
16k
tommy-xq/SA2VP
vit_train_swin.py
[ { "identifier": "create_optimizer", "path": "optim_factory.py", "snippet": "def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_and_bn:\n skip = {}\n if skip_list is not None:\n skip = skip_list\n elif hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)\n weight_decay = 0.\n else:\n parameters = model.parameters()\n\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp':\n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp':\n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n # elif opt_lower == 'novograd':\n # optimizer = NovoGrad(parameters, **opt_args)\n # elif opt_lower == 'nvnovograd':\n # optimizer = NvNovoGrad(parameters, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n raise ValueError\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer" }, { "identifier": "get_parameter_groups", "path": "optim_factory.py", "snippet": "def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):\n parameter_group_names = {}\n parameter_group_vars = {}\n\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or name in skip_list:\n group_name = \"no_decay\"\n this_weight_decay = 0.\n else:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n if get_num_layer is not None:\n layer_id = get_num_layer(name)\n group_name = \"layer_%d_%s\" % (layer_id, group_name)\n else:\n layer_id = None\n\n if group_name not in parameter_group_names:\n if get_layer_scale is not None:\n scale = get_layer_scale(layer_id)\n else:\n scale = 1.\n\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\n return list(parameter_group_vars.values())" }, { "identifier": "LayerDecayValueAssigner", "path": "optim_factory.py", "snippet": "class LayerDecayValueAssigner(object):\n def __init__(self, values):\n self.values = values\n\n def get_scale(self, layer_id):\n return self.values[layer_id]\n\n def get_layer_id(self, var_name):\n return get_num_layer_for_vit(var_name, len(self.values))" }, { "identifier": "build_dataset", "path": "datasets.py", "snippet": "def build_dataset(is_train, args):\n # must choose one\n transform = build_transform_vtab(is_train, args)\n # transform = build_transform_fgvc(is_train, args)\n \n prefix_fgvc = './data/fgvc' # replace yours, sample:'./data/fgvc'\n prefix_vtab = './data/vtab-1k' # replace yours, sample:'./data/vtab-1k'\n \n if args.data_set == 'CIFAR_ori':\n dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)\n nb_classes = 100\n elif args.data_set == 'IMNET':\n root = os.path.join(args.data_path, 'train' if is_train else 'test')\n dataset = datasets.ImageFolder(root, transform=transform)\n nb_classes = 1000\n elif args.data_set == \"image_folder\":\n root = args.data_path if is_train else args.eval_data_path\n dataset = ImageFolder(root, transform=transform)\n nb_classes = args.nb_classes\n assert len(dataset.class_to_idx) == nb_classes\n elif args.data_set == 'CUB':\n if is_train:\n dataset = FGVC_cub(root=prefix_fgvc+'/CUB_200_2011', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_cub(root=prefix_fgvc+'/CUB_200_2011', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 200\n elif args.data_set == 'DOG':\n if is_train:\n dataset = FGVC_dog(root=prefix_fgvc+'/dogs', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_dog(root=prefix_fgvc+'/dogs', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 120\n elif args.data_set == 'FLOWER':\n if is_train:\n dataset = FGVC_flower(root=prefix_fgvc+'/OxfordFlower', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_flower(root=prefix_fgvc+'/OxfordFlower', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'CAR':\n if is_train:\n dataset = FGVC_car(root=prefix_fgvc+'/cars', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_car(root=prefix_fgvc+'/cars', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 196\n elif args.data_set == 'BIRD':\n if is_train:\n dataset = FGVC_bird(root=prefix_fgvc+'/nabirds', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_bird(root=prefix_fgvc+'/nabirds', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 555\n elif args.data_set == 'CAL101':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/caltech101', my_mode=args.my_mode, train=True, transform=transform) # VTAB_attnmap\n else:\n dataset = VTAB(root=prefix_vtab+'/caltech101', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'CIFAR':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/cifar', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/cifar', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 100\n elif args.data_set == 'PATCH_CAMELYON':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/patch_camelyon', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/patch_camelyon', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 2\n elif args.data_set == 'EUROSAT':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/eurosat', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/eurosat', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 10\n elif args.data_set == 'DMLAB':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dmlab', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dmlab', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 6\n elif args.data_set == 'CLEVR_COUNT':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/clevr_count', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/clevr_count', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 8\n elif args.data_set == 'DTD':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dtd', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dtd', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 47\n elif args.data_set == 'FLOWER_S':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/oxford_flowers102', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/oxford_flowers102', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'PET':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/oxford_iiit_pet', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/oxford_iiit_pet', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 37\n elif args.data_set == 'SVHN_S':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/svhn', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/svhn', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 10\n elif args.data_set == 'SUN':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/sun397', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/sun397', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 397\n elif args.data_set == 'Resisc45':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/resisc45', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/resisc45', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 45\n elif args.data_set == 'Retinopathy':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/diabetic_retinopathy', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/diabetic_retinopathy', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 5\n elif args.data_set == 'CLEVR_DISTANCE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/clevr_dist', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/clevr_dist', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 6\n elif args.data_set == 'KITTI_DISTANCE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/kitti', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/kitti', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 4\n elif args.data_set == 'DS_LOC':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dsprites_loc', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dsprites_loc', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 16\n elif args.data_set == 'DS_ORI':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dsprites_ori', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dsprites_ori', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 16\n elif args.data_set == 'SN_AZI':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_azi', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_azi', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 18\n elif args.data_set == 'SN_ELE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_ele', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_ele', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 9\n elif args.data_set == 'DTD_DAM':\n if is_train:\n dataset = DTD(root='/data/damvp_data/cal_all/dtd', split=\"train\", transform=transform) # note: remember to change data path.\n else:\n dataset = DTD(root='/data/damvp_data/cal_all/dtd', split=\"test\", transform=transform) # note: use 'val' to find best and then 'test'. when training, use 'val'.\n nb_classes = 47\n elif args.data_set == 'GTSRB_DAM':\n if is_train:\n dataset = GTSRB(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = GTSRB(root='/data/damvp_data/cal_all', split=\"test\", transform=transform)\n nb_classes = 43\n elif args.data_set == 'FOOD_DAM':\n if is_train:\n dataset = Food101(root='/data/data', split=\"train\", transform=transform)\n else:\n dataset = Food101(root='/data/data', split=\"test\", transform=transform)\n nb_classes = 101\n elif args.data_set == 'CIFAR10_DAM':\n if is_train:\n dataset = CIFAR10(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = CIFAR10(root='/data/damvp_data/cal_all', split=\"val\", transform=transform)\n nb_classes = 10\n elif args.data_set == 'CIFAR100_DAM':\n if is_train:\n dataset = CIFAR100(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = CIFAR100(root='/data/damvp_data/cal_all', split=\"test\", transform=transform)\n nb_classes = 100\n elif args.data_set == 'SVHN_DAM':\n if is_train:\n dataset = SVHN(root='/data/damvp_data/cal_all/svhn', split=\"train\", transform=transform)\n else:\n dataset = SVHN(root='/data/damvp_data/cal_all/svhn', split=\"test\", transform=transform)\n nb_classes = 10\n else:\n raise NotImplementedError()\n assert nb_classes == args.nb_classes\n print(\"Number of the class = %d\" % args.nb_classes)\n\n return dataset, nb_classes" }, { "identifier": "build_beit_pretraining_dataset", "path": "datasets.py", "snippet": "def build_beit_pretraining_dataset(args):\n transform = DataAugmentationForBEiT(args)\n print(\"Data Aug = %s\" % str(transform))\n return ImageFolder(args.data_path, transform=transform)" }, { "identifier": "build_beit_pretraining_dataset_val", "path": "datasets.py", "snippet": "def build_beit_pretraining_dataset_val(args):\n transform = DataAugmentationForBEiT_val(args)\n return ImageFolder('/data/fgvc_deal/cub/test', transform=transform)" }, { "identifier": "train_one_epoch", "path": "engine_for_train.py", "snippet": "def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,\n start_steps=None, lr_schedule_values=None, wd_schedule_values=None,\n num_training_steps_per_epoch=None, update_freq=None):\n model.train(True)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n if loss_scaler is None:\n model.zero_grad()\n model.micro_steps = 0\n else:\n optimizer.zero_grad()\n\n for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n step = data_iter_step // update_freq\n if step >= num_training_steps_per_epoch:\n continue\n it = start_steps + step # global training iteration\n # Update LR & WD for the first acc\n if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:\n for i, param_group in enumerate(optimizer.param_groups):\n if lr_schedule_values is not None:\n param_group[\"lr\"] = lr_schedule_values[it] * param_group[\"lr_scale\"]\n if wd_schedule_values is not None and param_group[\"weight_decay\"] > 0:\n param_group[\"weight_decay\"] = wd_schedule_values[it]\n # print(samples)\n samples = samples.to(device, non_blocking=True)\n # images = images.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n if loss_scaler is None:\n samples = samples.half()\n loss, output = train_class_batch(\n model, samples, targets, criterion)# criterion_2, device\n else:\n with torch.cuda.amp.autocast():\n loss, output = train_class_batch(\n model, samples, targets, criterion)# criterion_2\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n if loss_scaler is None:\n loss /= update_freq\n model.backward(loss)\n model.step()\n\n if (data_iter_step + 1) % update_freq == 0:\n # model.zero_grad()\n # Deepspeed will call step() & model.zero_grad() automatic\n if model_ema is not None:\n model_ema.update(model)\n grad_norm = None\n loss_scale_value = get_loss_scale_for_deepspeed(model)\n else:\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss /= update_freq\n grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order,\n update_grad=(data_iter_step + 1) % update_freq == 0)\n if (data_iter_step + 1) % update_freq == 0:\n optimizer.zero_grad()\n if model_ema is not None:\n model_ema.update(model)\n loss_scale_value = loss_scaler.state_dict()[\"scale\"]\n\n torch.cuda.synchronize()\n\n if mixup_fn is None:\n class_acc = (output.max(-1)[-1] == targets).float().mean()\n else:\n class_acc = None\n metric_logger.update(loss=loss_value)\n metric_logger.update(class_acc=class_acc)\n metric_logger.update(loss_scale=loss_scale_value)\n min_lr = 10.\n max_lr = 0.\n for group in optimizer.param_groups:\n min_lr = min(min_lr, group[\"lr\"])\n max_lr = max(max_lr, group[\"lr\"])\n\n metric_logger.update(lr=max_lr)\n metric_logger.update(min_lr=min_lr)\n weight_decay_value = None\n for group in optimizer.param_groups:\n if group[\"weight_decay\"] > 0:\n weight_decay_value = group[\"weight_decay\"]\n metric_logger.update(weight_decay=weight_decay_value)\n metric_logger.update(grad_norm=grad_norm)\n\n if log_writer is not None:\n log_writer.update(loss=loss_value, head=\"loss\")\n log_writer.update(class_acc=class_acc, head=\"loss\")\n log_writer.update(loss_scale=loss_scale_value, head=\"opt\")\n log_writer.update(lr=max_lr, head=\"opt\")\n log_writer.update(min_lr=min_lr, head=\"opt\")\n log_writer.update(weight_decay=weight_decay_value, head=\"opt\")\n log_writer.update(grad_norm=grad_norm, head=\"opt\")\n\n log_writer.set_step()\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "evaluate", "path": "engine_for_train.py", "snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n # switch to evaluation mode\n model.eval()\n \n for batch in metric_logger.log_every(data_loader, 10, header):\n # samples, images = bs\n images = batch[0]\n target = batch[-1]\n images = images.to(device, non_blocking=True)\n # samples = samples.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n \n # compute output\n \n with torch.cuda.amp.autocast():\n output, prompt = model(images)\n loss = 0.8*criterion(output, target)+0.2*criterion(prompt, target)\n \n # acc1, acc5 = accuracy(output, target, topk=(1, 5))\n acc1 = accuracy(output, target, topk=(1, 5))[0]\n\n batch_size = target.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n \n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n \"\"\"\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n \"\"\"\n print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "NativeScalerWithGradNormCount", "path": "utils.py", "snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = get_grad_norm_(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)" }, { "identifier": "_build_swin_model", "path": "vpt_main/src/models/build_swin_backbone.py", "snippet": "def _build_swin_model(model_type, crop_size, model_root):\n if model_type == \"swint_imagenet\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=96,\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n drop_path_rate=0.2,\n num_classes=-1, # setting to a negative value will make head as identity\n )\n embed_dim = 96\n num_layers = 4\n elif model_type == \"swint_imagenet_ssl\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=96,\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n drop_path_rate=0.2,\n num_classes=-1,\n )\n embed_dim = 96\n num_layers = 4\n\n elif model_type == \"swins_imagenet\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=96,\n depths=[2, 2, 18, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n drop_path_rate=0.3,\n num_classes=-1,\n )\n embed_dim = 96\n num_layers = 4\n elif model_type == \"swinb_imagenet_224\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=7,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n elif model_type == \"swinb_imagenet_384\":\n model = SwinTransformer(\n img_size=384,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=12,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n\n elif model_type == \"swinb_imagenet22k_224\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=7,\n drop_path_rate=0.2, # try to from 0.5 -> 0, 0.1 is best on cifar.\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n elif model_type == \"swinb_imagenet22k_384\":\n model = SwinTransformer(\n img_size=384,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=12,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n elif model_type == \"swinl_imagenet22k_224\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=192,\n depths=[2, 2, 18, 2],\n num_heads=[6, 12, 24, 48],\n window_size=7,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 192\n num_layers = 4\n\n feat_dim = int(embed_dim * 2 ** (num_layers - 1))\n # load checkpoint\n model_w = os.path.join(model_root, MODEL_ZOO[model_type])\n checkpoint = torch.load(model_w, map_location='cpu')\n state_dict = checkpoint['model']\n\n \"\"\"\n if crop_size == 448:\n for k in list(state_dict.keys()):\n if \"attn_mask\" not in k:\n # remove prefix\n state_dict[k] = state_dict[k]\n # delete renamed or unused k\n else:\n del state_dict[k]\n\n # rename some keys for ssl models\n if model_type.endswith(\"ssl\"):\n # rename moco pre-trained keys\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('encoder.'):\n # remove prefix\n state_dict[k[len(\"encoder.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n \"\"\"\n\n model.load_state_dict(state_dict, strict=False)\n # load cross attention\n for name, param in model.named_parameters():\n name_list = name.split('.')\n if name_list[0]=='layers':\n if name_list[2]=='cross_attn':\n if name_list[4]=='attention_norm':\n load_name = name_list[0]+'.'+name_list[1]+'.blocks.'+name_list[3]+'.norm1.'+name_list[5]\n param.requires_grad = False\n param.copy_(state_dict[load_name])\n elif name_list[4]=='attn':\n if name_list[5]=='qkv':\n load_name = name_list[0]+'.'+name_list[1]+'.blocks.'+name_list[3]+'.attn.qkv.'+name_list[6]\n param.requires_grad = False\n param.copy_(state_dict[load_name])\n elif name_list[5]=='to_out':\n load_name = name_list[0]+'.'+name_list[1]+'.blocks.'+name_list[3]+'.attn.proj.'+name_list[6]\n param.requires_grad = False\n param.copy_(state_dict[load_name])\n\n return model, feat_dim" } ]
import argparse import datetime import numpy as np import time import torch import torch.nn as nn import torch.backends.cudnn as cudnn import json import os import utils import random import deepspeed from pathlib import Path from time import sleep from timm.data.mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner from datasets import build_dataset from datasets import build_beit_pretraining_dataset, build_beit_pretraining_dataset_val from engine_for_train import train_one_epoch, evaluate # engine for vit from utils import NativeScalerWithGradNormCount as NativeScaler from scipy import interpolate from timm.models.layers import trunc_normal_ from functools import partial from vpt_main.src.models.build_swin_backbone import _build_swin_model # choose model from deepspeed import DeepSpeedConfig
11,737
utils.create_ds_config(args) print(args) device = torch.device(args.device) seed = 42 torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) if args.disable_eval_during_finetuning: dataset_val = None else: dataset_val, _ = build_dataset(is_train=False, args=args) print("Calculation of training examples = %d" % len(dataset_train)) print("Calculation of other examples = %d" % len(dataset_val)) if True: # args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) if args.dist_eval: if len(dataset_val) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) else: sampler_val = torch.utils.data.SequentialSampler(dataset_val) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) sampler_val = torch.utils.data.SequentialSampler(dataset_val) if global_rank == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=int(4*args.batch_size), num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) else: data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.parameters()) print('------------------------------') for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None
# -------------------------------------------------------- # SA2VP: Spatially Aligned-and-Adapted Visual Prompt code # reference: # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Based on timm # https://github.com/rwightman/pytorch-image-models/tree/master/timm # --------------------------------------------------------' #os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' #os.environ['CUDA_VISIBLE_DEVICES']='0' class Dual_model(nn.Module): def __init__(self, args): super(Dual_model, self).__init__() self.vit_base, feat_dim = _build_swin_model('swinb_imagenet22k_224', 224, './backbone_ckpt') # where to save pre-trained model ./backbone_ckpt for k, p in self.vit_base.named_parameters(): name_list = k.split('.') print(name_list) if name_list[1] == 'deep_ppt' or name_list[1] == 'proj_ppt': p.requires_grad = True elif name_list[1] == '2': if name_list[2] == 'cross_attn': if name_list[4] == 'ffn' or name_list[4] == 'ffn_norm': p.requires_grad = True else: p.requires_grad = False else: p.requires_grad = False else: p.requires_grad = False self.class_head = nn.Linear(1024, args.nb_classes, bias=True) trunc_normal_(self.class_head.weight, std=0.02) def forward(self, x): x, p = self.vit_base.forward_features(x) # B*768 return self.class_head(x), self.class_head(p) def get_args(): parser = argparse.ArgumentParser('SA2VP script for image classification', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=30, type=int) parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--save_ckpt_freq', default=50, type=int) parser.add_argument("--discrete_vae_weight_path", type=str) parser.add_argument("--discrete_vae_type", type=str, default="dall-e") # Model parameters parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--rel_pos_bias', action='store_true') parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias') parser.set_defaults(rel_pos_bias=False) parser.add_argument('--abs_pos_emb', action='store_true') parser.set_defaults(abs_pos_emb=True) parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help="0.1 for base, 1e-5 for large. set 0 to disable layer scale") parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--second_input_size', default=112, type=int, help='images input size for discrete vae') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', help='Attention dropout rate (default: 0.)') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) parser.add_argument('--model_ema', action='store_true', default=False) parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD and using a larger decay by the end of training improves performance for ViTs.""") parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--layer_decay', type=float, default=0.9) parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0, help='Label smoothing (default: 0)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--second_interpolation', type=str, default='lanczos', help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")') # Evaluation parameters parser.add_argument('--crop_pct', type=float, default=None) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # * Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) parser.add_argument('--init_scale', default=0.001, type=float) parser.add_argument('--use_mean_pooling', action='store_true') parser.set_defaults(use_mean_pooling=True) parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False) # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--my_mode', default='train_val', type=str, help='my mode to train or test') parser.add_argument('--eval_data_path', default=None, type=str, help='dataset path for evaluation') parser.add_argument('--nb_classes', default=0, type=int, help='number of the classification types') parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true') parser.add_argument('--data_set', default='CUB', choices=['CIFAR', 'IMNET', 'image_folder', 'CUB', 'DOG', 'FLOWER', 'CAR', 'BIRD', 'CAL101', 'DMLAB','EUROSAT','PATCH_CAMELYON','CLEVR_COUNT','CIFAR100','FOOD101','SVHN','DTD','FLOWER_S','PET','SVHN_S','SUN','Resisc45','Retinopathy','CLEVR_DISTANCE','KITTI_DISTANCE','DS_LOC','DS_ORI','SN_AZI','SN_ELE', 'DTD_DAM', 'GTSRB_DAM', 'FOOD_DAM', 'CIFAR10_DAM', 'CIFAR100_DAM', 'SVHN_DAM'], type=str, help='ImageNet dataset path') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--save_ckpt', action='store_true') parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') parser.set_defaults(save_ckpt=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--enable_deepspeed', action='store_true', default=False) known_args, _ = parser.parse_known_args() if known_args.enable_deepspeed: try: parser = deepspeed.add_config_arguments(parser) ds_init = deepspeed.initialize except: print("Please 'pip install deepspeed==0.4.0'") exit(0) else: ds_init = None return parser.parse_args(), ds_init def main(args, ds_init): utils.init_distributed_mode(args) if ds_init is not None: utils.create_ds_config(args) print(args) device = torch.device(args.device) seed = 42 torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) if args.disable_eval_during_finetuning: dataset_val = None else: dataset_val, _ = build_dataset(is_train=False, args=args) print("Calculation of training examples = %d" % len(dataset_train)) print("Calculation of other examples = %d" % len(dataset_val)) if True: # args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) if args.dist_eval: if len(dataset_val) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) else: sampler_val = torch.utils.data.SequentialSampler(dataset_val) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) sampler_val = torch.utils.data.SequentialSampler(dataset_val) if global_rank == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=int(4*args.batch_size), num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) else: data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.parameters()) print('------------------------------') for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None
optimizer_params = get_parameter_groups(
1
2023-12-12 13:19:17+00:00
16k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n list of dict: The list of queue pair (QP) information if successful or None otherwise.\n The list of QP information is in the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n \"\"\"\n try:\n with open(switch_msg_snapshot, 'r') as stream:\n qp_info_list = yaml.safe_load(stream)\n except:\n logging.error(\"Read switch message snapshot %s error.\" % switch_msg_snapshot)\n return None\n\n logging.info(\"Read switch message snapshot %s.\" % switch_msg_snapshot)\n return qp_info_list" }, { "identifier": "Orchestrator", "path": "lumina/orchestrator/main.py", "snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "MLNXHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx" }, { "identifier": "IntelHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "LatencyMeasure", "path": "lumina/analyzer/measurer/latency_measure.py", "snippet": "class LatencyMeasure:\n \"\"\" Class to measure the latency between packets for some events,\n e.g., NACK latency, Retransmission latency, CNP latency\n\n Attributes:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb\n \"\"\"\n def __init__(self, packet_list, qp_info_list, is_read=False):\n \"\"\" Constructor\n\n Args:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb (default: False)\n\n Returns:\n N/A\n \"\"\"\n self.packet_list = packet_list\n self.qp_info_list = qp_info_list\n self.is_read = is_read\n\n def get_peer_qp_info(self, dest_qpn, dest_ip):\n \"\"\" Get the info of the peer QP (qpn, ip) of a given qp (qpn, ip)\n\n Args:\n dest_qpn (int): destination QP number\n dest_ip (str): destination IP\n\n Returns:\n int: peer QP number (None if not found)\n str: peer IP (None if not found)\n \"\"\"\n for qp_info in self.qp_info_list:\n if qp_info['qpn_snd'] == dest_qpn and qp_info['ip_snd'] == dest_ip:\n return qp_info['qpn_rcv'], qp_info['ip_rcv']\n elif qp_info['qpn_rcv'] == dest_qpn and qp_info['ip_rcv'] == dest_ip:\n return qp_info['qpn_snd'], qp_info['ip_snd']\n\n return None, None\n\n def get_bit_error_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with bit error flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with bit error flag\n \"\"\"\n error_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_bit_error() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n error_pkt_list.append(packet)\n\n return error_pkt_list\n\n def get_dropped_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with drop flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with drop flag\n \"\"\"\n dropped_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_dropped() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n dropped_pkt_list.append(packet)\n\n return dropped_pkt_list\n\n def get_ecn_pkts(self):\n \"\"\" Get the packets marked with ECN\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with ECN\n \"\"\"\n ecn_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_ecn():\n ecn_pkt_list.append(packet)\n\n return ecn_pkt_list\n\n def get_cnp_pkts(self):\n \"\"\" Get the congestion notification packets\n\n Returns:\n list of RRoCEPacket objects: the list of congestion notification packets\n \"\"\"\n cnp_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_cnp():\n cnp_pkt_list.append(packet)\n\n return cnp_pkt_list\n\n def get_undelivered_pkts(self, relative_dest_qpn = None):\n \"\"\" Get the undelivered packets (dropped or marked with bit error)\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of undelivered packets\n \"\"\"\n undelivered_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_delivered() == True:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n undelivered_pkt_list.append(packet)\n\n return undelivered_pkt_list\n\n def get_nack(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return the NACK packet that triggers its retransmission.\n If there's no NACK packet found for the undelivered packet, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet that triggers retransmission\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the NACK packet that triggers the retransmission of the undelivered packet\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() == undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_qp_first_nack_before_retrans(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the first NACK packet on its QP between it and its retransmission.\n If there's no NACK packet found before the retransmission, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the first NACK packet on the QP between the undelivered packet and its retransmission\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() <= undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return packet\n\n return None\n\n def get_qp_next_delivered_pkt(self, current_pkt):\n \"\"\" For a packet, return the next delivered packet on the same QP.\n\n Args:\n current_pkt (RRoCEPacket object): the current packet\n\n Returns:\n RRoCEPacket object: the next delivered packet on the same QP (None if not found)\n \"\"\"\n switch_seqnum = current_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_qp_roce_data_pkt(packet, current_pkt) and \\\n packet.get_switch_seqnum() > switch_seqnum and \\\n packet.is_delivered():\n return packet\n\n return None\n\n def get_retransmit_pkt(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return its retransmission packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the retransmission packet of the undelivered packet (None if not found)\n \"\"\"\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_latency_between_pkts(self, packet_alpha, packet_beta):\n \"\"\" Return the time of packet_beta - time of packet_alpha in seconds\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n float: the time difference between two packets in seconds\n \"\"\"\n return packet_beta.get_switch_timestamp() - packet_alpha.get_switch_timestamp()\n\n def is_same_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are the same RoCE data packet (same src ip, dst ip, dest qp, and psn)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are the same RoCE data packet, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp() and \\\n packet_alpha.get_roce_pkt_seq() == packet_beta.get_roce_pkt_seq()\n\n def is_same_qp_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are RoCE data packets on the same QP (same src ip, dst ip, and dest qp)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are RoCE data packets on the same QP, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp()\n\n def get_qp_next_delivered_pkt_latency(self, pkt):\n \"\"\" Get the latency between 'pkt' and next 'delivered' packet on the same QP\n\n Args:\n pkt (RRoCEPacket object): the packet\n\n Returns:\n float: the latency between 'pkt' and next 'delivered' packet on the same QP\n (None if not found)\n \"\"\"\n\n next_pkt = self.get_qp_next_delivered_pkt(pkt)\n if next_pkt is None:\n return None\n\n return self.get_latency_between_pkts(pkt, next_pkt)\n\n def get_nack_gen_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK generation latency, i.e., the duration from the detection of\n the undelivered packet to the generation of the NACK packet that triggers its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK generation latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n # NACK should be triggered by the next delivered packet on the same QP\n next_delivered_pkt = self.get_qp_next_delivered_pkt(undelivered_pkt)\n if self.is_same_roce_data_pkt(next_delivered_pkt, undelivered_pkt):\n # We should never reach here\n return None\n\n nack_gen_latency = self.get_latency_between_pkts(next_delivered_pkt, nack_pkt)\n return nack_gen_latency\n\n def get_nack_resp_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK response latency, i.e., the duration from the generation of\n the NACK packet to the retransmission of this undelivered packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK response latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n nack_resp_latency = self.get_latency_between_pkts(nack_pkt, retransmit_pkt)\n return nack_resp_latency\n\n def get_retransmit_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the retransmission latency, i.e., the duration from the packet\n to its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the retransmission latency for the undelivered packet (None if not found)\n \"\"\"\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n return retransmit_latency\n\n def get_nack_gen_latency_list(self, relative_dest_qpn=None):\n \"\"\" Return a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n nack_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n nack_latency_list.append(None)\n else:\n nack_latency = self.get_latency_between_pkts(undelivered_pkt, nack_pkt)\n nack_latency_list.append(nack_latency)\n\n return nack_latency_list\n\n def get_retransmit_latency_list(self, relative_dest_qpn):\n \"\"\" Return a list of retransmission latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of retransmission latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n retransmit_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n retransmit_latency_list.append(None)\n else:\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n retransmit_latency_list.append(retransmit_latency)\n\n return retransmit_latency_list" }, { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" }, { "identifier": "TRIGGER_OOS", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_OOS = 1" }, { "identifier": "TRIGGER_TIMEOUT", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_TIMEOUT = 2" } ]
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
13,636
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger()
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger()
if trigger == TRIGGER_OOS:
9
2023-12-09 08:21:14+00:00
16k
boweniac/autogan
autogan/agents/tool_agent_search.py
[ { "identifier": "CodeExecution", "path": "autogan/tools/code_execution_tool.py", "snippet": "class CodeExecution:\n def __init__(self, work_dir: Optional[str] = None):\n \"\"\"A class for code execution\n 用于代码执行的类\n\n Supports python, bash, shell, powershell code\n 支持 python, bash, shell, powershell 代码\n\n Please note when using:\n 使用时请注意:\n\n 1.Code must be encapsulated with ``` symbol\n 1.代码必须使用 ``` 符号封装\n\n 2.Must be run in a docker environment\n 2.须在 docker 环境中运行\n\n :param work_dir: The relative path for code execution, default is extensions\n 执行代码的相对路径,默认为 extensions\n \"\"\"\n if work_dir is None:\n work_dir = \"extensions\"\n self._work_dir = os.getcwd() + \"/\" + work_dir\n self._win32 = sys.platform == \"win32\"\n self._path_separator = self._win32 and \"\\\\\" or \"/\"\n\n def code_execution_reply(self, text: str) -> Tuple[str, int]:\n \"\"\"Execute code and return result\n 执行代码并返回结果\n\n :param text: Code must be encapsulated with ``` symbol\n 代码必须使用 ``` 符号封装\n\n :return:\n --execution_result: Execution result\n 执行结果\n --tokens: Tokens of the execution result\n 执行结果的 tokens\n \"\"\"\n\n # Determine whether it is running in docker\n if os.path.exists(\"/.dockerenv\"):\n lang, code = self.extract_code(text)\n if code is None:\n exitcode = 1\n output = \"Submit your Python code to me and I can tell you the execution result. But I can't write code or talk to you. So please just submit the completed code to me encapsulated with ``` symbols. And you should always use the 'print' function for the output\"\n else:\n exitcode, output = self.execute(code, lang=lang)\n else:\n exitcode = 1\n output = \"executing code needs to run in a docker environment\"\n\n if not output:\n exitcode = 1\n output = \"You should always use the 'print' function for the output\"\n\n result = \"execution succeeded\" if exitcode == 0 else \"execution failed\"\n if exitcode != 0:\n output += \"\\nIf you need to install dependencies, you can send me the code for installing dependencies. Like ```pip install openai```\"\n execution_result = f\"exitcode: {exitcode} ({result})\\n{output}\"\n else:\n execution_result = f\"exitcode: {exitcode} ({result})\\nCode output: \\n{output}\"\n tokens = count_text_tokens(execution_result)\n\n return execution_result, tokens\n\n def execute(\n self,\n code: str,\n lang: Optional[str] = None,\n timeout: Optional[int] = 600,\n ) -> Tuple[int, str]:\n \"\"\"Execute code\n 执行代码\n\n :param code: Code to be executed\n :param lang: Code language, if empty, will try to infer the language from the code\n :param timeout: Maximum code execution time (seconds)\n\n :return:\n --exitcode: exitcode\n --output: Execution result\n \"\"\"\n try:\n if not lang:\n lang = self.infer_lang(code)\n\n if lang not in [\"bash\", \"shell\", \"sh\", \"python\", \"Python\"]:\n return 1, \"unknown language\"\n\n print(\n colored(\n f\"\\n\\n>>>>>>>> EXECUTING CODE BLOCK (language is {lang})...\",\n \"red\",\n ),\n flush=True,\n )\n\n if self._win32 and lang in [\"sh\", \"shell\"]:\n lang = \"ps1\"\n\n # Create a temporary file\n code_hash = md5(code.encode()).hexdigest()\n filename = f\"tmp_code_{code_hash}.{'py' if lang.startswith('python') else lang}\"\n filepath = os.path.join(self._work_dir, filename)\n file_dir = os.path.dirname(filepath)\n os.makedirs(file_dir, exist_ok=True)\n\n # Write the code into a temporary file\n with open(filepath, \"w\", encoding=\"utf-8\") as tmp_code:\n tmp_code.write(code)\n\n # Execute code\n cmd = [\n sys.executable if lang.startswith(\"python\") or lang.startswith(\"Python\") else self._cmd(lang),\n f\".\\\\{filename}\" if self._win32 else filename,\n ]\n if self._win32:\n result = subprocess.run(\n cmd,\n cwd=self._work_dir,\n capture_output=True,\n text=True,\n )\n else:\n signal.signal(signal.SIGALRM, self._timeout_handler)\n try:\n signal.alarm(timeout)\n # run the code in a subprocess in the current docker container in the working directory\n result = subprocess.run(\n cmd,\n cwd=self._work_dir,\n capture_output=True,\n text=True,\n )\n signal.alarm(0)\n except TimeoutError:\n os.remove(filepath)\n return 1, \"Timeout\"\n\n os.remove(filepath)\n if result.returncode:\n logs = result.stderr\n abs_path = str(pathlib.Path(filepath).absolute())\n logs = logs.replace(str(abs_path), \"\").replace(filename, \"\")\n else:\n logs = result.stdout\n\n return result.returncode, logs\n except Exception as e:\n return 1, f\"execution error: {e}\"\n\n @staticmethod\n def extract_code(text: str) -> Tuple[Optional[str], Optional[str]]:\n \"\"\"Extract code from text\n\n :param text: 包含代码的文本,代码必须以```符号封装\n\n :return:\n --lang: Code must be encapsulated with ``` symbol\n --code: Code to be executed\n \"\"\"\n match = re.findall(r\"```(\\w*)\\n(.*?)\\n```\", text, flags=re.DOTALL)\n return match[0] if match else (None, None)\n\n @staticmethod\n def infer_lang(code) -> str:\n \"\"\"Infer code language\n\n :param code: Code to be executed\n\n :return: The inferred code language, if the inference fails, it will return unknown\n \"\"\"\n if (code.startswith(\"python \") or code.startswith(\"pip\") or code.startswith(\"python3 \")\n or code.startswith(\"pip3\")):\n return \"sh\"\n\n try:\n compile(code, \"test\", \"exec\")\n return \"python\"\n except SyntaxError:\n return \"unknown\"\n\n @staticmethod\n def _timeout_handler(signum, frame):\n raise TimeoutError(\"Timed out!\")\n\n @staticmethod\n def _cmd(lang):\n if lang.startswith(\"python\") or lang in [\"bash\", \"sh\", \"powershell\"]:\n return lang\n if lang in [\"shell\"]:\n return \"sh\"\n if lang in [\"ps1\"]:\n return \"powershell\"\n raise NotImplementedError(f\"{lang} not recognized in code execution\")" }, { "identifier": "WolframAlphaAPIWrapper", "path": "autogan/tools/wolfram_alpha_tool.py", "snippet": "class WolframAlphaAPIWrapper:\n def __init__(self, wolfram_config: Dict):\n \"\"\"Wrapper for Wolfram Alpha.\n\n :param wolfram_config: JSON format of email_config\n {\"app_id\": \"\"}\n \"\"\"\n self._wolfram_client = wolframalpha.Client(wolfram_config['app_id'])\n\n def run(self, query: str) -> Optional[str]:\n from urllib.error import HTTPError\n\n res = None\n for _ in range(20):\n try:\n res = self._wolfram_client.query(query)\n break\n except HTTPError:\n sleep(1)\n except Exception:\n return None\n if res is None:\n return None\n\n try:\n if not res[\"@success\"]:\n return None\n assumption = next(res.pods).text\n answer = \"\"\n for result in res[\"pod\"]:\n if result[\"@title\"] == \"Solution\":\n answer = result[\"subpod\"][\"plaintext\"]\n if result[\"@title\"] == \"Results\" or result[\"@title\"] == \"Solutions\":\n for i, sub in enumerate(result[\"subpod\"]):\n answer += f\"ans {i}: \" + sub[\"plaintext\"] + \"\\n\"\n break\n if answer == \"\":\n answer = next(res.results).text\n\n except Exception:\n return None\n\n if answer is None or answer == \"\":\n return None\n\n return f\"Assumption: {assumption} \\nAnswer: {answer}\"" }, { "identifier": "count_text_tokens", "path": "autogan/oai/count_tokens_utils.py", "snippet": "def count_text_tokens(text: str, model: Optional[str] = \"gpt-3.5-turbo\") -> int:\n \"\"\"Calculate the tokens of the text.\n\n :param text: The text to be tokenized\n :param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.\n\n :return: tokens\n \"\"\"\n\n if not text:\n return 0\n\n model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']\n if model not in model_list:\n model = \"gpt-3.5-turbo\"\n\n try:\n encoding = tiktoken.encoding_for_model(model)\n num_tokens = len(encoding.encode(text))\n except Exception as e:\n print(e)\n num_tokens = 0\n\n return num_tokens" }, { "identifier": "UniversalAgent", "path": "autogan/agents/universal_agent.py", "snippet": "class UniversalAgent:\n def __init__(\n self,\n name: str,\n agent_config: Optional[Dict] = None,\n duty: Optional[str] = None,\n work_flow: Optional[str] = None,\n use_tool: Optional[str] = None, # only | join\n super_rich: Optional[str] = None, # auto | on | off\n stream_mode: Optional[bool] = None,\n ):\n \"\"\"Agent base class\n\n Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.\n 每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。\n\n To provide functions beyond the modeling capabilities for the agent, you can override the tool_function method.\n 想要为 agent 提供模型能力之外的功能,可以通过重写 tool_function 方法来实现。\n\n :param name: The agent name should be unique in the organizational structure.\n agent name 在组织架构中应当是唯一的。\n :param agent_config: The agent configuration includes:\n agent 配置包括:\n - main_model: The LLM configuration of the agent's main body.\n agent 主体的 LLM 配置。\n - summary_model: The LLM configuration used for compressing context and generating text summaries.\n 用于压缩上下文以及生成文本摘要的 LLM 配置。\n - request_interval_time: The interval time of LLM requests.\n LLM 请求间隔时间。\n - request_timeout:The timeout of LLM requests.\n LLM 请求超时时间。\n - max_retries: The maximum number of retries for LLM requests.\n LLM 请求最大重试次数。\n :param duty: Used to explain one's job responsibilities to other agents.\n 用于向其他 agent 说明自己的工作职责。\n :param work_flow: Defines the workflow of the agent.\n 定义 agent 的工作流程。\n :param use_tool: Defines the mode of the agent using the tool_function:\n 定义 agent 使用 tool_function 的模式:\n - None: means not using the tool function.\n 不使用工具函数。\n - only: Do not use the LLM, only use the tool function to generate results.\n 不使用 LLM,仅使用工具函数生成结果。\n - join: The content generated by the LLM will be used as the input parameter for the tool_function.\n LLM 生成的内容将作为 tool_function 的输入参数\n :param super_rich: Whether to enable the deep thought function. When enabled,\n it uses a set of analysis processes to refine the output of the agent. However,\n this can increase the number of tokens used, so it is not recommended for use with the gpt-4 model.\n The name \"super_rich\" is a reminder that using this function with gpt-4 can be expensive,\n even more so than Elon Musk's earning speed.\n 是否开启深思功能,开启后会使用一套分析流程来收敛 agent 的输出结果,但这样做会增加 tokens 的消耗,因此不建议在gpt-4模型下使用。\n 之所以这个参数叫 super_rich ,是为了提醒用户,如果在 gpt-4 下使用,其花钱的速度可能会超过马斯克赚钱的速度。\n - auto: Disable for GPT-4, enable for other models\n 在 gpt-4下禁用,其他模型开启\n - on: Always enabled\n 始终开启\n - off: Always disabled\n 始终关闭\n :param stream_mode: Whether to enable the stream_mode\n 定义 agent 的工作流程。\n \"\"\"\n self.name = name\n self.agent_config = AgentConfig(agent_config) if agent_config else None\n self.duty = duty\n self.super_rich = super_rich # auto | on | off\n self.stream_mode = stream_mode\n self.response_func = default_response_func # Used to return results to the interface or terminal.\n self.workmates = \"\" # relevant personnel's name and duty\n self.pipeline = \"\" # In a linear workflow, this is the next person to communicate with.\n # Translate the session ID of the pusher into the sub-session ID of the receiver.\n self.sub_to_main_task_id = defaultdict(str)\n # Translate the session id of the sender into the superior session id of the receiver.\n self.main_to_sub_task_id = defaultdict(str)\n self._work_flow = work_flow\n self._use_tool = use_tool # only | join\n self._conversation_messages = defaultdict(list) # key: task id,value: Conversation history\n self._conversation_focus = defaultdict(Dict) # key: task id,value: {\"task_issuer\": \"\", \"task_content\": \"\"}\n\n def set_agent_config(self, agent_config: Dict):\n self.agent_config = AgentConfig(agent_config)\n\n def new_task(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,\n completion_tokens: int):\n \"\"\"Accept tasks posted by other agent.\n\n :param switch: AgentSwitch object\n :param task_id: New task id\n :param sender_name: Task Issuer's Name\n :param content: Task content\n :param completion_tokens: Task content tokens\n \"\"\"\n # Avoid excessively long task content\n if (self._use_tool != \"only\" and completion_tokens >\n self.agent_config.main_model_config.max_messages_tokens * 0.5):\n self._push_to_switch(switch, task_id, \"The task is too long\", 5)\n\n # Cache task information to maintain focus during task execution\n task_content = content.replace(f\"@{self.name}\", \"please help me\")\n task_content = task_content.replace(f\"{switch.task_tag}\", \"\")\n self._conversation_focus[task_id] = {'task_issuer': sender_name, 'task_content': task_content}\n # Start the generation process\n self._generate_process(switch, task_id, sender_name, content, completion_tokens)\n\n def receive(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,\n completion_tokens: int):\n \"\"\"Receive messages sent by other agents (excluding new task requests)\n\n :param switch: AgentSwitch object\n :param task_id: Task id\n :param sender_name: Name of the agent sending the message\n :param content: Message content\n :param completion_tokens: Message content tokens\n \"\"\"\n if self._use_tool != \"only\":\n safe_size = self.agent_config.main_model_config.max_messages_tokens\n if completion_tokens > safe_size:\n # 如消息内容过长,则对其进行压缩\n compressed_text, total_tokens = compressed_text_universal(\n content, self.agent_config.summary_model_config,\n self.name, self.response_func, self.stream_mode,\n self._conversation_focus[task_id]['task_content'], safe_size)\n if compressed_text:\n content = compressed_text\n completion_tokens = total_tokens\n\n # Press the message into the session record of the current task\n self._conversation_messages[task_id].append(\n {'role': 'user', 'content': content, 'tokens': completion_tokens})\n\n # Start the generation process\n self._generate_process(switch, task_id, sender_name, content, completion_tokens)\n\n def tool_function(self, task_id: str, param: Optional[str] = None,\n tokens: Optional[int] = None) -> tuple[str, int]:\n \"\"\"When the value of the use_tool parameter is 'only' or 'join', please override this method.\n\n :return: --content: Generate content\n --tokens: Generate content tokens\n \"\"\"\n pass\n\n def _base_message(self, switch: AgentSwitch, task_id: str) \\\n -> tuple[dict[str, str], Optional[dict[str, Any]], int]:\n \"\"\"This is the paradigm message required for each round of dialogue.\n 每轮对话都需要的范式消息\n\n :param switch: AgentSwitch object\n :param task_id: Task id\n\n :return:\n -- system_message: Used to clarify its own workflow to the agent and where the agent can seek help.\n 用于向 agent 阐明自身工作流程,以及可以向哪些 agent 寻求帮助。\n -- focus_message: Used to maintain focus during task execution, including who is currently executing the task and what the content of the task is. It will not be forgotten or compressed with the increase of dialogue rounds.\n 用于在任务执行过程中保持专注力,包括当前正在执行谁发布的任务、任务的内容是什么。不会随会话轮次的增多而被遗忘或压缩。\n -- total_tokens: The overall tokens of the content of the system_message and the focus_message.\n system_message 以及 focus_message 内容的整体 tokens。\n \"\"\"\n total_tokens = 0\n\n info = environment_info()\n\n # Assemble system message\n system_prompt = f\"\"\"Now your name is {self.name}, you are an assistant who will not give up easily when you encounter difficulties\n\nEnvironment information:\n{info}\"\"\"\n\n if self._work_flow:\n system_prompt += f\"\"\"\n\nYour work flow is::\n{self._work_flow}\"\"\"\n\n if self.workmates:\n system_prompt += f\"\"\"\n\nThe following professionals can help you accomplish the task:\n{self.workmates}\"\"\"\n\n if self._use_tool is None:\n system_prompt += f\"\"\"\n \n Please follow these guidelines when replying to any content:\n 1. Be aware that if you do not @recipient at the beginning, the system will give an error.\n 2. When asking for help, you need to first post a task, the method is: @recipient {switch.task_tag} task content.\n 3. The recipient does not have any dialogue records before the task begins, nor can they see your conversations with others.\n 4. Do not suggest the recipient to communicate with others.\n 5. Do not explain to the initiator of the task what you are going to do.\n 6. In the reply, do not converse with two recipients at the same time.\n \"\"\"\n\n total_tokens += 37\n\n system_message = {'role': 'system', 'content': system_prompt}\n if task_id in self._conversation_focus and self._conversation_focus[task_id]:\n # Assemble focus message\n focus_prompt = f\"\"\"current task content:\ntask issuer: {self._conversation_focus[task_id]['task_issuer']}\ntask content: {self._conversation_focus[task_id]['task_content']}\"\"\"\n\n if self._use_tool is None:\n if self.pipeline and self.pipeline != \"\\\\\":\n focus_prompt += f\"\"\"\n\nWhen you have the result of the task, please @{self.pipeline} {switch.task_tag} and reply to the execution result, He'll know what to do next\"\"\"\n else:\n focus_prompt += f\"\"\"\n\nWhen you have the result of the task, please @{self._conversation_focus[task_id]['task_issuer']} and reply to the execution result\"\"\"\n\n total_tokens += count_text_tokens(focus_prompt)\n\n focus_message = {'role': 'user', 'content': focus_prompt}\n else:\n focus_message = None\n\n return system_message, focus_message, total_tokens\n\n def _super_rich_message(self, switch: AgentSwitch, task_id: str, ideas: dict, index: int)\\\n -> tuple[list[str, dict], bool]:\n \"\"\"Thought prompts, with new content requested at each level\n 深思提示词,每层请求新的内容\n\n :param switch: AgentSwitch object\n :param task_id: Task id\n :param ideas: Results generated\n :param index: Current thinking depth\n\n :return:\n -- message_list: Thought prompts list\n -- tag:\n -- message: Thought prompts\n -- is_end:\n \"\"\"\n messages = []\n\n task_issuer = \"\"\n if self.pipeline and self.pipeline != \"\\\\\":\n task_issuer += f\"{self.pipeline} : When there is no more work to be done, Submit the results to me.\"\n else:\n task_issuer += f\"{self._conversation_focus[task_id]['task_issuer']} : When there is no more work to be done, Submit the results to me.\"\n\n total_tokens = 0\n\n info = f\"\"\"\n\nreference workflow:\n{environment_info()}\"\"\"\n\n workmates = \"\"\n if self.workmates:\n workmates = f\"\"\"\n\nrelevant personnel's name and duty:\n{self.workmates}\n{task_issuer}\"\"\"\n\n workflow = \"\"\n if self._work_flow:\n workflow = f\"\"\"\n{self._work_flow}\"\"\"\n\n repetitive_prompt = f\"\"\"The above is a group chat record, assuming you are {self.name}, please do the following analysis:\n\nStep 1: Understand your overall workflow (No need to output):\n workflow:{workflow}\n\nStep 2: Analyze whether {self.name} is repeating a task in the workflow or encountering difficulties (No need to output).\n\nStep 3: output your analysis results\n If yes, please give advice on how to stop repeating from the perspective of {self.name}.\n If not, please reply one word 'None'.\"\"\"\n\n messages.append([\"Observe whether the previous conversation fell into a cycle\", {'role': 'system', 'content': repetitive_prompt}])\n\n debug_prompt = f\"\"\"The above is a group chat record, please do the following analysis:\n\nStep 1: Understand your overall workflow, Including the execution conditions and objectives for each step (No need to output):\n workflow:{workflow}\n \nStep 2: Analyze whether there are unresolved errors in the previous conversation (No need to output).\n\nStep 3: Analyze If there are unresolved errors, Think about what the root cause of these errors is (No need to output).\n\nStep 4: Analyze If there are unresolved errors, From {self.name}'s perspective, how should you solve it next? (No need to output)\n\nStep 5: output your analysis results, including the following content:\n whether there are unresolved errors in the previous conversation:\n If there are unresolved errors, What errors in the dialogue:\n If there are unresolved errors, The root cause of the error:\n If there are unresolved errors, How to solve it next:\n\nNote: There's no need to output the specific dialogue content, just output the analysis results.\"\"\"\n\n messages.append([\"Reflect on whether there are any errors in the previous dialogue process\", {'role': 'system', 'content': debug_prompt}])\n\n planning_prompt = f\"\"\"The above is a group chat record, assuming you are {self.name}, please do the following analysis:\n\nStep 1: Understand your overall workflow (No need to output):\n workflow:{workflow}\n\nStep 2: Analyze which item to execute or continue to execute in the workflow (No need to output).\n\nStep 3: Understand the specific errors that have occurred in the current conversation (No need to output).\n Are you stuck in a deadlock: {ideas[\"Observe whether the previous conversation fell into a cycle\"]}\n \n {ideas[\"Reflect on whether there are any errors in the previous dialogue process\"]}\n\nStep 4: Understand some rules (No need to output).\n 1. When asking for help, you need to first post a task,\n 2. The recipient does not have any dialogue records before the task begins, nor can they see your conversations with others.\n 2. Don't let the other party to communicate with others.\n 3. In your plan, there should be no content about apologizing to others or what you are going to do.\n\nStep 5: output your analysis results, including the following content:\n Do you need to create a task:\n In the next round of conversation, the specific work you need to do is(Please explain in detail and Ignore the work that has been completed.):\n all the details that need to be taken into consideration, including recommended methods or tools, etc:\n\nNote: There's no need to output the specific dialogue content, just output the analysis results.\n\"\"\"\n\n messages.append([\"Think about what to do next\", {'role': 'system', 'content': planning_prompt}])\n\n communicate_prompt = f\"\"\"your name is {self.name}, please do the following analysis:\n \nStep 1: Understand your work plan (No need to output):\n {ideas[\"Think about what to do next\"]}\n\nStep 2: Get to know your colleagues, including what they can and cannot do (No need to output):\n {workmates}\n {self._conversation_focus[task_id]['task_issuer']} : \"\"\n \nStep 3: Analyze who is the most relevant colleague to the first step of next round of conversation the specific work you need to do, note that you can only choose one person (No need to output).\n\nStep 4: output your analysis results, including the following content:\n who is the most relevant colleague to the first step of your plan:\n What are the requirements when the other party receives messages:\n What can the other party do:\n What the other party cannot do:\n \nNote: please provide the correct names of relevant personnel, Don't provide names that don't exist.\"\"\"\n\n messages.append([\"Think about who to communicate with next\", {'role': 'user', 'content': communicate_prompt}])\n\n reply_prompt = f\"\"\"The above is a group chat record, assuming you are {self.name}, Please strictly follow the contents of the guidelines below to generate your response, note do not communicate with others or perform other tasks:\n\n{info}\n\nStep 1: Clarify who you will be communicating with (No need to output):\n {ideas[\"Think about who to communicate with next\"]}\n\nStep 2: Specify the task you are going to carry out (No need to output):\n {ideas[\"Think about what to do next\"]}\n\nStep 3: Understand some response rules (No need to output).\n 1. Please do not mention the second person in your reply content.\n 2. When you need to post a task, the method is: @recipient {switch.task_tag} task content.\n\nStep 4: Please follow the content of the previous step, From {self.name}'s perspective, Output your response in the format below:\n @who you will be communicating with + Reply content\"\"\"\n\n messages.append([\"Generate reply content\", {'role': 'system', 'content': reply_prompt}])\n\n if index == len(messages) - 1:\n return messages[index], True\n else:\n return messages[index], False\n\n def _generate_process(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,\n completion_tokens: int):\n \"\"\"Generate process\n\n If the value of the use_tool parameter is None, only the main LLM is used to generate a response.\n 如果 use_tool 参数的值为 None,则仅使用主体 LLM 生成回复。\n\n If the value of the use_tool parameter is 'only', the main LLM is skipped and the tool_function is used directly to generate a response.\n 如果 use_tool 参数的值为 only,则跳过主体 LLM 直接使用 tool_function 生成回复。\n\n If the value of the use_tool parameter is 'join', the main LLM is first used to generate content, and then the generated content is used as the input parameter for tool_function.\n 如果 use_tool 参数的值为 join,则先使用主体 LLM 生成内容,然后将生成的内容作为 tool_function 的输入参数。\n \"\"\"\n hold_content = content\n hold_completion_tokens = completion_tokens\n try:\n if self._use_tool != \"only\":\n if self._use_tool == \"join\":\n print(\n colored(\n f\"\\n\\n>>>>>>>> tool call:\",\n \"cyan\",\n ),\n flush=True,\n )\n content, completion_tokens = self._base_generate_reply(switch, task_id, \"tool_call\")\n else:\n if self.super_rich == \"on\":\n content, completion_tokens = self._super_rich_generate_reply(switch, task_id)\n elif (self.super_rich == \"auto\" or self.super_rich is None) and \"gpt-4\" not in self.agent_config.main_model_config.model:\n content, completion_tokens = self._super_rich_generate_reply(switch, task_id)\n else:\n content, completion_tokens = self._base_generate_reply(switch, task_id, \"main\")\n if content is None:\n raise ValueError(\"Failed to generate content.\")\n else:\n content = re.sub(r'^@\\S+\\s+', '', content).strip()\n\n if self._use_tool and not content.startswith(\"@\"):\n content, completion_tokens = self.tool_function(task_id, content, completion_tokens)\n # Assign recipients for the results generated by the tool_function.\n if not content.startswith(\"@\"):\n if (task_id in self._conversation_focus and \"task_issuer\" in\n self._conversation_focus[task_id]):\n receiver = self._conversation_focus[task_id]['task_issuer']\n else:\n receiver = sender_name\n content = f\"@{receiver} \" + content\n self.response_func(self.name, \"tool\", \"\", False, 0, content, completion_tokens, None)\n self._push_to_switch(switch, task_id, content, completion_tokens)\n except SystemExit:\n print(\"The task is finished.\")\n except Exception as e:\n print(f\"e :{e}\")\n if self._use_tool == \"only\":\n self._push_to_switch(switch, task_id, f\"@{sender_name} Generate error, Trying again\", 4)\n else:\n self._re_push_to_switch(switch, task_id, hold_content, hold_completion_tokens,\n sender_name)\n\n def _base_generate_reply(self, switch: AgentSwitch, task_id: str, gen: str) -> tuple[Optional[str], Optional[int]]:\n \"\"\"Use the main LLM to generate responses.\n\n Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first.\n\n :param switch: AgentSwitch Object\n :param task_id: Task id\n\n :return: --content: Generate content\n --tokens: Generate content tokens\n \"\"\"\n system_message, focus_message, total_tokens = self._base_message(switch, task_id)\n\n # Calculate the target size of context compression.\n safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens\n # Compress the historical conversation records.\n request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size)\n request_messages.insert(0, system_message)\n if focus_message:\n request_messages.insert(0, focus_message)\n return generate_chat_completion(self.agent_config.main_model_config, request_messages, self.name, gen, self.response_func, self.stream_mode)\n\n def _super_rich_generate_reply(self, switch: AgentSwitch, task_id: str) -> tuple[Optional[str], Optional[int]]:\n \"\"\"Use the main LLM to generate responses.\n\n Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first.\n\n :param switch: AgentSwitch Object\n :param task_id: Task id\n\n :return: --content: Generate content\n --tokens: Generate content tokens\n \"\"\"\n system_message, focus_message, total_tokens = self._base_message(switch, task_id)\n\n # Calculate the target size of context compression.\n safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens\n\n # Compress the historical conversation records.\n request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size)\n\n if focus_message:\n request_messages.insert(0, focus_message)\n\n index = 0\n ideas = defaultdict(str)\n while True:\n message, is_end = self._super_rich_message(switch, task_id, ideas, index)\n if is_end:\n gen = \"main\"\n else:\n gen = \"idea\"\n\n print(\n colored(\n f\"\\n\\n>>>>>>>> {message[0]}:\",\n \"cyan\",\n ),\n flush=True,\n )\n\n if message[1][\"role\"] == \"system\":\n messages = request_messages.copy()\n messages.append(message[1])\n content, token = generate_chat_completion(self.agent_config.main_model_config, messages, self.name, gen, self.response_func, self.stream_mode)\n ideas[message[0]] = content\n tokens = token\n else:\n content, token = generate_chat_completion(self.agent_config.main_model_config, [message[1]], self.name, gen, self.response_func, self.stream_mode)\n ideas[message[0]] = content\n tokens = token\n if is_end:\n break\n else:\n index += 1\n\n return content, tokens\n\n def _push_to_switch(self, switch: AgentSwitch, task_id: str, content: str, completion_tokens: int):\n content = content.replace(f\"@{self.name} \", \"\")\n self._conversation_messages[task_id].append(\n {'role': 'assistant', 'content': content, 'tokens': completion_tokens})\n\n switch.handle_and_forward(task_id, self.name, content, completion_tokens)\n\n def _chat_messages_safe_size(self, task_id: str, safe_size: int) \\\n -> tuple[list, int]:\n \"\"\"Compress the historical session records within the current task scope (excluding system_message and focus_message)\n\n :param task_id: Task id\n :param safe_size: The max_messages_tokens of the main LLM configuration\n\n :return: --request_messages: It is used for the message content requested to LLM, with the tokens field of each message removed.\n –-total_tokens: The overall tokens after compression.\n \"\"\"\n if task_id in self._conversation_messages and self._conversation_messages[task_id]:\n conversation_messages, request_messages, total_tokens = compressed_messages(\n self._conversation_messages[task_id], self._conversation_focus[task_id]['task_content'],\n self.agent_config.summary_model_config, self.name, self.response_func, self.stream_mode,\n safe_size)\n\n if request_messages:\n self._conversation_messages[task_id] = conversation_messages\n return request_messages, total_tokens\n\n return [], 0\n\n @staticmethod\n def _re_push_to_switch(switch: AgentSwitch, task_id: str, content: str, completion_tokens: int, sender: str):\n switch.handle_and_forward(task_id, sender, content, completion_tokens)" }, { "identifier": "compressed_text_universal", "path": "autogan/utils/compressed_text_utils.py", "snippet": "def compressed_text_universal(text: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n focus: Optional[str] = None, safe_size: Optional[int] = None) \\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Compress the text, generating either a regular summary or a cue summary.\n 压缩文本,可生成普通摘要或线索摘要。\n\n First, the long text is sliced, and then a summary is generated for each slice.\n 首先将长文本切片,然后逐切片的生成摘要。\n\n If the value of the focus parameter is not None, then the attention will be focused on the focus area while generating the summary.\n 如 focus 参数的值不为 None 则在生成摘要时注意力集中于 focus。\n\n If the value of the safe_size parameter is not None and the length of the initial compression result exceeds the safe_size, the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size.\n 如 safe_size 参数的值不为 None 且初次压缩结果长度超过 safe_size,则会对摘要进一步压缩,压缩后的大小被期望保持在 safe_size 范围之内。\n\n :param text: Text to be compressed.\n 待压缩的文本。\n :param summary_model_config: LLM configuration used for text compression.\n 用于压缩文本的 LLM 配置。\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param focus: The focus direction when compressing text.\n 压缩文本时的专注方向。\n :param safe_size: The target size of the text after compression, if not provided there is no limit.\n 文本压缩后的目标尺寸,如果为空则不做限制。\n\n :return:\n --compressed_text: The text after compression.\n 压缩后的文本。\n --total_tokens: Total tokens after compression.\n 压缩后的整体tokens。\n \"\"\"\n\n compressed_text = \"\"\n total_tokens = 0\n\n split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)\n\n for st in split_texts:\n if focus:\n content, tokens = generate_text_clues(st, focus, summary_model_config, agent_name, response_func,\n stream_mode)\n else:\n content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode)\n\n if content:\n compressed_text += content + \"\\n\"\n total_tokens += tokens\n\n if compressed_text:\n if safe_size and safe_size < total_tokens:\n return compressed_text_into_safe_size(compressed_text, safe_size, summary_model_config, agent_name,\n response_func, stream_mode)\n else:\n return compressed_text, total_tokens\n else:\n return None, None" }, { "identifier": "WebSearch", "path": "autogan/tools/web_search_tool.py", "snippet": "class WebSearch:\n def __init__(self, google_search_config: Dict):\n \"\"\"A class for google search\n\n :param search_config: JSON format of email_config {\"cx\": \"\", \"key\": \"\"}\n \"\"\"\n self._cx = google_search_config[\"cx\"]\n self._key = google_search_config[\"key\"]\n\n def get_search_detail(self, keyword: str, start: int, agent_name: str, gen: str, response_func: ResponseFuncType)\\\n -> Optional[str]:\n \"\"\"Obtain the main text content of a search result page\n\n :param keyword: Search keywords\n :param start: Search result index offset\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n :param response_func: Used to return results to the interface or terminal.\n\n :return: The main content of the page\n \"\"\"\n\n result = self.google_search(keyword, start, 1)\n\n if result is None:\n return None\n\n url = result[0][\"link\"]\n\n response_func(agent_name, gen, \"\", False, 0, url, 0, None)\n\n # Obtain the main content of the URL page\n response = requests.get(url)\n response.encoding = response.apparent_encoding\n soup = BeautifulSoup(response.text, 'html.parser')\n main_text = soup.get_text()\n\n # Remove extra line breaks\n s = re.sub('\\n+', '\\n', main_text)\n\n if s:\n return s\n else:\n return None\n\n def google_search(self, keyword: str, start: int, num: int) -> Optional[list]:\n \"\"\"Call Google web search interface\n\n :param keyword: Search keywords\n :param start: Search result index offset\n :param num: Get the number of results\n\n :return:\n --result_list: Search results list\n --is_success: Successful or not\n \"\"\"\n\n # 接口参数\n url = \"https://www.googleapis.com/customsearch/v1\"\n\n params = {\n 'q': quote(keyword),\n 'start': start,\n 'num': num,\n 'cx': self._cx,\n 'key': self._key,\n }\n\n loop = 3\n for i in range(loop):\n try:\n response = requests.get(url, params=params)\n response.raise_for_status() # If the response status is not 200, throw an exception\n data = response.json() # Parse the returned json data\n\n if 'items' not in data:\n raise ValueError(\"The return value is empty.\")\n\n # Extract the title, link, and snippet fields from each object in the items field.\n results = []\n for item in data['items']:\n result = {\n 'title': item.get('title', ''),\n 'link': item.get('link', ''),\n 'snippet': item.get('snippet', '')\n }\n results.append(result)\n\n return results\n except requests.HTTPError as http_err:\n time.sleep(5)\n if i == loop - 1:\n print(f'HTTP error occurred: {http_err}')\n return None\n except Exception as e:\n time.sleep(5)\n if i == loop - 1:\n return None" } ]
import re from collections import defaultdict from typing import Optional, Dict from autogan.tools.code_execution_tool import CodeExecution from autogan.tools.wolfram_alpha_tool import WolframAlphaAPIWrapper from autogan.oai.count_tokens_utils import count_text_tokens from autogan.agents.universal_agent import UniversalAgent from autogan.utils.compressed_text_utils import compressed_text_universal from autogan.tools.web_search_tool import WebSearch
11,604
class ToolAgentSearch(UniversalAgent): def __init__( self, search_config: Dict, agent_config: Optional[Dict] = None, retry_times: Optional[int] = 10, name: Optional[str] = "WebSearchExp", duty: Optional[str] = 'Not only can I search for information on the internet, ' 'but I can also answer questions using the Wolfram engine.', work_flow: Optional[str] = """I hope you are an internet search expert. When you receive a search request, you have the following two tools to choose from: 1. web: You can search for information on the internet. When using it, please enclose the search keywords in your output with the ```web\n ``` symbol, for example: ```web Your search keywords ``` 2. wolfram: You can use the Wolfram engine to help you calculate or query data related to Mathematics, finance, unit conversion, data analysis, science, geography, history, culture, movies, music, etc. When using it, please enclose the English question that Wolfram can understand in your output with the ```wolfram\n ``` symbol, for example: ```wolfram one wolfram query ``` Note: When you decide to use a tool, please do not @ anyone.""", # duty: Optional[str] = '我不但可以从网络上搜索资料,还可以通过 wolfram 引擎来回答问题。', # work_flow: Optional[str] = """我希望你是一个网络搜索专家,当你收到搜索请求时,你有一下两种工具可供选择: # # 1. web: 可以在网络上查找资料。使用时请在你的输出内容中,将搜索关键词用```web\n ``` 符号封装,例如: # ```web # Your search keywords # ``` # # 2.wolfram: 可以使用wolfram引擎,帮你计算或查询数学、金融、单位转换、数据分析、科学、地理、历史、文化、电影、音乐等相关数据。使用时请在你的输出内容中,将 wolfram 可以理解的英文问题用```wolfram\n ``` 符号封装,例如: # ```wolfram # one wolfram query # ``` # # 注意:当你决定使用工具时,请不要@任何人""", ): """WebSearchExpert 1.Receive the user's question and convert it into search keywords. 2.Call the Google Search API to obtain a result and extract the webpage content. 3.If no content related to the user's question is extracted, call the Google Search API again to obtain the next result. 4.Repeat operations 2 and 3 until reaching retry_times. Within the same task session domain, if the search keywords are the same, the offset of the search results will accumulate and move backwards. :param agent_config: The agent configuration includes: agent 配置包括: - main_model: The LLM configuration of the agent's main body. agent 主体的 LLM 配置。 - summary_model: The LLM configuration used for compressing context and generating text summaries. 用于压缩上下文以及生成文本摘要的 LLM 配置。 - request_interval_time: The interval time of LLM requests. LLM 请求间隔时间。 - request_timeout:The timeout of LLM requests. LLM 请求超时时间。 - max_retries: The maximum number of retries for LLM requests. LLM 请求最大重试次数。 :param search_config: JSON format of email_config {"cx": "", "key": ""} :param retry_times: Represent the maximum number of attempts for each search, the default is 10. :param name: The agent name should be unique in the organizational structure. :param duty: Used to explain one's job responsibilities to other agents. :param work_flow: Defines the workflow of the agent. 定义 agent 的工作流程。 """ super().__init__( name, agent_config=agent_config, duty=duty, work_flow=work_flow, use_tool="join" ) self._web_search = WebSearch(search_config["google_search"]) if "google_search" in search_config else None
class ToolAgentSearch(UniversalAgent): def __init__( self, search_config: Dict, agent_config: Optional[Dict] = None, retry_times: Optional[int] = 10, name: Optional[str] = "WebSearchExp", duty: Optional[str] = 'Not only can I search for information on the internet, ' 'but I can also answer questions using the Wolfram engine.', work_flow: Optional[str] = """I hope you are an internet search expert. When you receive a search request, you have the following two tools to choose from: 1. web: You can search for information on the internet. When using it, please enclose the search keywords in your output with the ```web\n ``` symbol, for example: ```web Your search keywords ``` 2. wolfram: You can use the Wolfram engine to help you calculate or query data related to Mathematics, finance, unit conversion, data analysis, science, geography, history, culture, movies, music, etc. When using it, please enclose the English question that Wolfram can understand in your output with the ```wolfram\n ``` symbol, for example: ```wolfram one wolfram query ``` Note: When you decide to use a tool, please do not @ anyone.""", # duty: Optional[str] = '我不但可以从网络上搜索资料,还可以通过 wolfram 引擎来回答问题。', # work_flow: Optional[str] = """我希望你是一个网络搜索专家,当你收到搜索请求时,你有一下两种工具可供选择: # # 1. web: 可以在网络上查找资料。使用时请在你的输出内容中,将搜索关键词用```web\n ``` 符号封装,例如: # ```web # Your search keywords # ``` # # 2.wolfram: 可以使用wolfram引擎,帮你计算或查询数学、金融、单位转换、数据分析、科学、地理、历史、文化、电影、音乐等相关数据。使用时请在你的输出内容中,将 wolfram 可以理解的英文问题用```wolfram\n ``` 符号封装,例如: # ```wolfram # one wolfram query # ``` # # 注意:当你决定使用工具时,请不要@任何人""", ): """WebSearchExpert 1.Receive the user's question and convert it into search keywords. 2.Call the Google Search API to obtain a result and extract the webpage content. 3.If no content related to the user's question is extracted, call the Google Search API again to obtain the next result. 4.Repeat operations 2 and 3 until reaching retry_times. Within the same task session domain, if the search keywords are the same, the offset of the search results will accumulate and move backwards. :param agent_config: The agent configuration includes: agent 配置包括: - main_model: The LLM configuration of the agent's main body. agent 主体的 LLM 配置。 - summary_model: The LLM configuration used for compressing context and generating text summaries. 用于压缩上下文以及生成文本摘要的 LLM 配置。 - request_interval_time: The interval time of LLM requests. LLM 请求间隔时间。 - request_timeout:The timeout of LLM requests. LLM 请求超时时间。 - max_retries: The maximum number of retries for LLM requests. LLM 请求最大重试次数。 :param search_config: JSON format of email_config {"cx": "", "key": ""} :param retry_times: Represent the maximum number of attempts for each search, the default is 10. :param name: The agent name should be unique in the organizational structure. :param duty: Used to explain one's job responsibilities to other agents. :param work_flow: Defines the workflow of the agent. 定义 agent 的工作流程。 """ super().__init__( name, agent_config=agent_config, duty=duty, work_flow=work_flow, use_tool="join" ) self._web_search = WebSearch(search_config["google_search"]) if "google_search" in search_config else None
self._wolfram_alpha = WolframAlphaAPIWrapper(
1
2023-12-06 03:24:34+00:00
16k
ebb-earl-co/tidal-wave
tidal_wave/playlist.py
[ { "identifier": "AudioFormat", "path": "tidal_wave/media.py", "snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\"" }, { "identifier": "PlaylistsEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class PlaylistsEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, videos/<VIDEOID> endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the (music) video, including video quality, video title, date,\n video artists, duration, etc.\"\"\"\n\n uuid: str = field(repr=False)\n title: str\n number_of_tracks: int\n number_of_videos: int\n description: str\n created: Annotated[datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")]\n type: str\n public_playlist: bool\n url: str\n square_image: str # UUID v4" }, { "identifier": "TracksEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class TracksEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, tracks/{TRACKID} endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the audio track, including audio quality, track title, ISRC,\n track artists, album, track number, duration, etc.\"\"\"\n\n id: int = field(repr=False)\n title: str\n duration: int # seconds\n replay_gain: float = field(repr=False)\n peak: float = field(repr=False)\n track_number: int\n volume_number: int\n version: Optional[str]\n copyright: str = field(repr=False)\n url: str\n isrc: str = field(repr=False)\n explicit: bool\n audio_quality: str = field(repr=False)\n audio_modes: List[str] = field(repr=False)\n media_metadata: \"MediaMetadata\"\n artist: \"Artist\"\n artists: List[\"Artist\"]\n album: \"TrackAlbum\"\n\n def __post_init__(self):\n name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )\n self.name: str = name if self.version is None else f\"{name} ({self.version})\"" }, { "identifier": "VideosEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class VideosEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, videos/<VIDEOID> endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the (music) video, including video quality, video title, date,\n video artists, duration, etc.\"\"\"\n\n id: int = field(repr=False)\n title: str\n volume_number: int\n track_number: int\n release_date: Annotated[\n datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")\n ]\n duration: int # seconds\n quality: str\n explicit: bool\n type: str\n artist: \"Artist\"\n artists: List[\"Artist\"]\n\n def __post_init__(self):\n self.name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )" }, { "identifier": "request_playlists", "path": "tidal_wave/requesting.py", "snippet": "def request_playlists(\n session: Session, identifier: int\n) -> Optional[PlaylistsEndpointResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"playlists\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n subclass=PlaylistsEndpointResponseJSON,\n )" }, { "identifier": "Track", "path": "tidal_wave/track.py", "snippet": "class Track:\n track_id: int\n\n def __post_init__(self):\n self._has_lyrics: Optional[bool] = None\n self.tags: dict = {}\n self.album_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(\n session, self.track_id\n )\n\n def get_album(self, session: Session):\n self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(\n session, self.metadata.album.id\n )\n\n def get_credits(self, session: Session):\n self.credits: Optional[TracksCreditsResponseJSON] = request_credits(\n session, self.track_id\n )\n\n def get_lyrics(self, session: Session):\n if self._has_lyrics is None:\n self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(\n session, self.track_id\n )\n if self.lyrics is None:\n self._has_lyrics = False\n else:\n self._has_lyrics = True\n else:\n return self.lyrics\n\n def get_stream(self, session: Session, audio_format: AudioFormat):\n \"\"\"Populates self.stream, self.manifest\"\"\"\n aq: Optional[str] = af_aq.get(audio_format)\n self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(\n session, self.track_id, aq\n )\n\n def set_manifest(self):\n \"\"\"This method sets self.manifest and self.codec\"\"\"\n self.manifest: Manifest = manifester(self.stream)\n # https://dashif.org/codecs/audio/\n if self.manifest.codecs == \"flac\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mqa\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mha1\": # Sony 360 Reality Audio\n self.codec = \"mka\"\n elif self.manifest.codecs == \"mp4a.40.5\": # HE-AAC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.29\": # HE-AAC v2\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.2\": # AAC-LC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"eac3\": # Enhanced AC-3\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.34\": # MP3\n self.codec = \"mp3\"\n\n def set_album_dir(self, out_dir: Path):\n \"\"\"This method sets self.album_dir, based on self.album and\n out_dir. In particular, self.album_dir is a subdirectory of out_dir\n based on the name of the album's artist\"\"\"\n artist_substring: str = self.album.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.album.name} \" f\"[{self.album.id}] [{self.album.release_date.year}]\"\n )\n self.album_dir: Path = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.album.number_of_volumes > 1:\n volume_substring: str = f\"Volume {self.metadata.volume_number}\"\n (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, audio_format: AudioFormat):\n \"\"\"This method sets self.filename. It's based on self.metadata\n as well as audio_format. Additionally, if the available codecs in\n self.manifest don't match audio_format, warnings are logged\"\"\"\n _track_part: str = f\"{self.metadata.track_number:02d} - {self.metadata.name}\"\n if audio_format == AudioFormat.low:\n track_substring: str = f\"{_track_part} [L]\"\n elif audio_format == AudioFormat.high:\n track_substring: str = f\"{_track_part} [H]\"\n elif audio_format == AudioFormat.lossless:\n track_substring: str = f\"{_track_part} [CD]\"\n elif audio_format == AudioFormat.mqa:\n track_substring: str = f\"{_track_part} [Q]\"\n elif audio_format == AudioFormat.hi_res:\n track_substring: str = f\"{_track_part} [HiRes]\"\n elif audio_format == AudioFormat.dolby_atmos:\n track_substring: str = f\"{_track_part} [A]\"\n elif audio_format == AudioFormat.sony_360_reality_audio:\n track_substring: str = f\"{_track_part} [360]\"\n else:\n track_substring: str = _track_part\n\n # Check for MQA masquerading as HiRes here\n if audio_format == AudioFormat.hi_res:\n if self.manifest.codecs == \"mqa\":\n logger.warning(\n \"Even though HiRes audio format was requested, this track is only \"\n \"available in MQA format. TIDAL regards this as 'HiRes' even though \"\n \"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. \"\n \"Downloading of track will continue, but it will be marked as MQA.\"\n )\n self.filename: Optional[str] = f\"{_track_part} [Q].{self.codec}\"\n elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):\n logger.warning(\n \"Even though HiRes audio format was requested, and TIDAL responded to \"\n \"that request without error, this track is only available in lossless \"\n \"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will \"\n \"continue, but it will be marked as Lossless ([CD]).\"\n )\n self.filename: Optional[str] = f\"{_track_part} [CD].{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n\n # for use in playlist file ordering\n self.trackname: str = re.match(r\"(?:\\d{2,3} - )(.+?$)\", self.filename).groups()[\n 0\n ]\n\n def set_outfile(self):\n \"\"\"Uses self.album_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n if self.album.number_of_volumes > 1:\n self.outfile: Path = (\n self.album_dir / f\"Volume {self.metadata.volume_number}\" / self.filename\n )\n self.absolute_outfile = str(self.outfile.absolute())\n else:\n self.outfile: Path = self.album_dir / self.filename\n self.absolute_outfile = str(self.outfile.absolute())\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Track {self.absolute_outfile} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes a JPEG file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_image: Path = (\n self.album_dir / f\"{a.name.replace('..', '')}.jpg\"\n )\n if not track_artist_image.exists():\n download_artist_image(session, a, self.album_dir)\n\n def save_artist_bio(self, session: Session):\n \"\"\"This method writes a JSON file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_bio_json: Path = self.album_dir / f\"{a.name}-bio.json\"\n if not track_artist_bio_json.exists():\n artist_bio: Optional[ArtistsBioResponseJSON] = request_artist_bio(\n session, a.id\n )\n if artist_bio is not None:\n logger.info(\n f\"Writing artist bio for artist {a.id} to \"\n f\"'{str(track_artist_bio_json.absolute())}\"\n )\n track_artist_bio_json.write_text(artist_bio.to_json())\n\n def save_album_cover(self, session: Session):\n \"\"\"This method saves cover.jpg to self.album_dir; the bytes for cover.jpg\n come from self.album.cover\"\"\"\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if (not self.cover_path.exists()) or (not self.album_cover_saved):\n download_cover_image(\n session=session, cover_uuid=self.album.cover, output_dir=self.album_dir\n )\n else:\n self.album_cover_saved = True\n\n def set_urls(self, session: Session):\n \"\"\"This method sets self.urls based on self.manifest\"\"\"\n if isinstance(self.manifest, JSONDASHManifest):\n self.urls: List[str] = self.manifest.urls\n elif isinstance(self.manifest, XMLDASHManifest):\n self.urls: List[str] = self.manifest.build_urls(session=session)\n self.download_headers: Dict[str, str] = {\"Accept\": self.manifest.mime_type}\n if session.session_id is not None:\n self.download_headers[\"sessionId\"] = session.session_id\n self.download_params = {k: None for k in session.params}\n\n def download_url(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method downloads self.urls[0], for use in situations when\n the manifest returned by TIDAL API contains one URL. It relies on\n byte range headers to incrementally get all content from a URL\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n # Implement HTTP range requests here to mimic official clients\n range_size: int = 1024 * 1024 # 1 MiB\n content_length: int = fetch_content_length(\n session=session, url=self.urls[0]\n )\n if content_length == 0:\n return\n\n range_headers: Iterable[str] = http_request_range_headers(\n content_length=content_length,\n range_size=range_size,\n return_tuple=False,\n )\n for rh in range_headers:\n with session.get(\n self.urls[0], params=self.download_params, headers={\"Range\": rh}\n ) as rr:\n if not rr.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(rr.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFMPEG to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile,\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(\n f\"Track {self.track_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def download_urls(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method writes the contents from self.urls to a temporary\n directory, then uses FFmpeg to re-mux the data to self.outfile\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=self.download_headers, params=self.download_params\n ) as resp:\n if not resp.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(resp.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFmpeg to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile, acodec=\"copy\", loglevel=\"quiet\"\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(f\"Track {self.track_id} written to '{self.absolute_outfile}'\")\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method GETs the data from self.urls and writes it\n to self.outfile.\"\"\"\n if len(self.urls) == 1:\n outfile: Optional[Path] = self.download_url(\n session=session, out_dir=out_dir\n )\n else:\n outfile: Optional[Path] = self.download_urls(\n session=session, out_dir=out_dir\n )\n\n return outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary,\n write the correct values of various metadata tags to the file.\n E.g. for .flac files, the album's artist is 'ALBUMARTIST',\n but for .m4a files, the album's artist is 'aART'.\"\"\"\n tags = dict()\n if (self.codec == \"flac\") or (self.codec == \"mka\"):\n tag_map = {k: v[\"flac\"] for k, v in TAG_MAPPING.items()}\n elif self.codec == \"m4a\":\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"album\"]] = self.album.title\n tags[tag_map[\"album_artist\"]] = \";\".join((a.name for a in self.album.artists))\n tags[tag_map[\"album_peak_amplitude\"]] = f\"{self.stream.album_peak_amplitude}\"\n tags[tag_map[\"album_replay_gain\"]] = f\"{self.stream.album_replay_gain}\"\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"barcode\"]] = self.album.upc\n tags[tag_map[\"comment\"]] = self.metadata.url\n tags[tag_map[\"copyright\"]] = self.metadata.copyright\n tags[tag_map[\"date\"]] = str(self.album.release_date)\n tags[tag_map[\"isrc\"]] = self.metadata.isrc\n tags[tag_map[\"title\"]] = self.metadata.name\n tags[tag_map[\"track_peak_amplitude\"]] = f\"{self.metadata.peak}\"\n tags[tag_map[\"track_replay_gain\"]] = f\"{self.metadata.replay_gain}\"\n # credits\n for tag in {\"composer\", \"engineer\", \"lyricist\", \"mixer\", \"producer\", \"remixer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.credits, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n # lyrics\n try:\n _lyrics = self.lyrics.subtitles\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[tag_map[\"lyrics\"]] = _lyrics\n\n if self.codec == \"flac\":\n # track and disk\n tags[\"DISCTOTAL\"] = f\"{self.album.number_of_volumes}\"\n tags[\"DISC\"] = f\"{self.metadata.volume_number}\"\n tags[\"TRACKTOTAL\"] = f\"{self.album.number_of_tracks}\"\n tags[\"TRACKNUMBER\"] = f\"{self.metadata.track_number}\"\n # instrument-specific\n # piano\n try:\n piano_credits: List[str] = [\n f\"{pc} (piano)\" for pc in self.credits.piano\n ]\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[\"PERFORMER\"] = piano_credits\n\n elif self.codec == \"m4a\":\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n tags[\"trkn\"] = [(self.metadata.track_number, self.album.number_of_tracks)]\n tags[\"disk\"] = [(self.metadata.volume_number, self.album.number_of_volumes)]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n # add album cover\n if self.codec == \"flac\":\n p = mutagen.flac.Picture()\n p.type = mutagen.id3.PictureType.COVER_FRONT\n p.desc = \"Album Cover\"\n p.width = p.height = 1280\n p.mime = \"image/jpeg\"\n p.data = self.cover_path.read_bytes()\n self.mutagen.add_picture(p)\n elif self.codec == \"m4a\":\n self.mutagen[\"covr\"] = [\n MP4Cover(self.cover_path.read_bytes(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n self.mutagen.save()\n # Make sure audio track comes first because of\n # less-sophisticated audio players that only\n # recognize the first stream\n if self.codec == \"flac\":\n with temporary_file(suffix=\".mka\") as tf:\n shutil.move(str(self.outfile.absolute()), tf.name)\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{tf.name}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy\n -metadata:s:v title='Album cover' -metadata:s:v comment='Cover (front)'\n -disposition:v attached_pic \"{self.absolute_outfile}\" \"\"\"\n )\n subprocess.run(cmd)\n elif self.codec == \"m4a\":\n with temporary_file(suffix=\".mka\") as tf:\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{self.absolute_outfile}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy \"{tf.name}\" \"\"\"\n )\n subprocess.run(cmd)\n shutil.copyfile(tf.name, self.absolute_outfile)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[TracksEndpointResponseJSON] = None,\n album: Optional[AlbumsEndpointResponseJSON] = None,\n ) -> Optional[str]:\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n self.outfile = None\n return\n\n if \"DOLBY_ATMOS\" in self.metadata.media_metadata.tags:\n if audio_format != AudioFormat.dolby_atmos:\n logger.warning(\n f\"Track {self.track_id} is only available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if audio_format == AudioFormat.dolby_atmos:\n if \"DOLBY_ATMOS\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Dolby Atmos audio format was requested, but track \"\n f\"{self.track_id} is not available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.sony_360_reality_audio:\n if \"SONY_360RA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Sony 360 Reality Audio audio format was requested, but track \"\n f\"{self.track_id} is not available in Sony 360 Reality Audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.mqa:\n if \"MQA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"MQA audio format was requested, but track \"\n f\"{self.track_id} is not available in MQA audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if album is None:\n self.get_album(session)\n else:\n self.album = album\n\n if self.album is None:\n self.outfile = None\n return\n\n self.get_credits(session)\n self.get_stream(session, audio_format)\n if self.stream is None:\n return\n self.set_manifest()\n self.set_album_dir(out_dir)\n self.set_filename(audio_format)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return\n\n try:\n self.get_lyrics(session)\n except Exception:\n pass\n\n self.save_album_cover(session)\n\n try:\n self.save_artist_image(session)\n except Exception:\n pass\n\n try:\n self.save_artist_bio(session)\n except Exception:\n pass\n\n self.set_urls(session)\n\n if self.download(session, out_dir) is None:\n return\n\n self.craft_tags()\n self.set_tags()\n\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dump({k: v}, fp)\n return None\n\n def dumps(self) -> str:\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dumps({k: v})\n return None" }, { "identifier": "download_cover_image", "path": "tidal_wave/utils.py", "snippet": "def download_cover_image(\n session: Session,\n cover_uuid: str,\n output_dir: Path,\n file_name: str = \"cover.jpg\",\n dimension: Union[int, Tuple[int]] = 1280,\n) -> Optional[Path]:\n \"\"\"Given a UUID that corresponds to a (JPEG) image on Tidal's servers,\n download the image file and write it as 'cover.jpeg' or 'cover.png'\n in the directory `path_to_output_dir`. Returns path to downloaded file\"\"\"\n cover_url_part: str = cover_uuid.replace(\"-\", \"/\")\n if isinstance(dimension, int):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension}x{dimension}\"\n elif isinstance(dimension, tuple):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension[0]}x{dimension[1]}\"\n\n with session.get(url=_url, headers={\"Accept\": \"image/jpeg\"}) as r:\n if not r.ok:\n logger.warning(\n \"Could not retrieve data from Tidal resources/images URL \"\n f\"due to error code: {r.status_code}\"\n )\n logger.debug(r.reason)\n return\n else:\n bytes_to_write = BytesIO(r.content)\n\n if bytes_to_write is not None:\n output_file: Path = output_dir / file_name\n bytes_to_write.seek(0)\n output_file.write_bytes(bytes_to_write.read())\n bytes_to_write.close()\n return output_file" }, { "identifier": "temporary_file", "path": "tidal_wave/utils.py", "snippet": "@contextmanager\ndef temporary_file(suffix: str = \".mka\"):\n \"\"\"This context-managed function is a stand-in for\n tempfile.NamedTemporaryFile as that stdlib object experiences\n errors on Windows.\"\"\"\n file_name: str = os.path.join(\n tempfile.gettempdir(), f\"{os.urandom(24).hex()}{suffix}\"\n )\n if not os.path.exists(file_name):\n open(file=file_name, mode=\"x\").close()\n\n tf = open(file=file_name, mode=\"wb\")\n try:\n yield tf\n finally:\n tf.close()\n os.unlink(tf.name)" }, { "identifier": "TIDAL_API_URL", "path": "tidal_wave/utils.py", "snippet": "TIDAL_API_URL: str = \"https://api.tidal.com/v1\"" }, { "identifier": "Video", "path": "tidal_wave/video.py", "snippet": "class Video:\n video_id: int\n\n def __post_init__(self):\n self.tags: dict = {}\n self.codec: str = \"mp4\"\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /videos endpoint\"\"\"\n self.metadata: Optional[VideosEndpointResponseJSON] = request_videos(\n session, self.video_id\n )\n\n def get_contributors(self, session: Session):\n \"\"\"Request from TIDAL API /videos/contributors endpoint\"\"\"\n self.contributors: Optional[\n VideosContributorsResponseJSON\n ] = request_video_contributors(session, self.video_id)\n\n def get_stream(self, session: Session, video_format=VideoFormat.high):\n \"\"\"Populates self.stream by requesting from TIDAL API\n /videos/playbackinfopostpaywall endpoint\"\"\"\n self.stream: Optional[VideosEndpointStreamResponseJSON] = request_video_stream(\n session, self.video_id, video_format.value\n )\n\n def get_m3u8(self, session: Session):\n \"\"\"This method sets self.m3u8, an m3u8.M3U8 object\n following the HTTP Live Streaming specification; parsed from\n self.stream. I.e., self.get_stream() needs to have been executed\n before calling this method. N.b. self.m3u8 almost certainly will\n be a multivariant playlist, meaning further processing of its\n contents will be necessary.\"\"\"\n self.m3u8: m3u8.Playlist = playlister(session=session, vesrj=self.stream)\n\n def set_urls(self):\n \"\"\"This method uses self.m3u8, an m3u8.M3U8 object that is variant:\n (https://developer.apple.com/documentation/http-live-streaming/creating-a-multivariant-playlist)\n It retrieves the highest-quality .m3u8 in its .playlists attribute,\n and sets self.urls as the list of strings from that m3u8.Playlist\"\"\"\n # for now, just get the highest-bandwidth playlist\n playlist: m3u8.Playlist = variant_streams(self.m3u8)\n self.M3U8 = m3u8.load(playlist.uri)\n if self.M3U8 is None or len(self.M3U8.files) == 0:\n raise TidalM3U8Exception(\n f\"HLS media segments are not available for video {self.video_id}\"\n )\n self.urls: List[str] = self.M3U8.files\n\n def set_artist_dir(self, out_dir: Path):\n \"\"\"Set self.artist_dir, which is the subdirectory of `out_dir`\n with name `self.metadata.artist.name`\"\"\"\n self.artist_dir: Path = out_dir / self.metadata.artist.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, out_dir: Path):\n \"\"\"Set self.filename, which is constructed from self.metadata.name\n and self.stream.video_quality\"\"\"\n self.filename: str = (\n f\"{self.metadata.name} [{self.stream.video_quality}].{self.codec}\"\n )\n\n def set_outfile(self):\n \"\"\"Uses self.artist_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n self.outfile: Path = self.artist_dir / self.filename\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Video {str(self.outfile.absolute())} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"Requests the HLS video files that constitute self.video_id.\n Writes HLS bytes to a temporary file, then uses FFmpeg to write the\n video data to self.outfile\"\"\"\n if session.session_id is not None:\n download_headers: Dict[str, str] = {\"sessionId\": session.session_id}\n else:\n download_headers: dict = dict()\n download_params: Dict[str, None] = {k: None for k in session.params}\n # self.outfile should already have been set by self.set_outfile()\n logger.info(\n f\"Writing video {self.video_id} to '{str(self.outfile.absolute())}'\"\n )\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=download_headers, params=download_params\n ) as download_response:\n if not download_response.ok:\n logger.warning(f\"Could not download {self}\")\n else:\n ntf.write(download_response.content)\n else:\n ntf.seek(0)\n\n # will always be .mp4 because HLS\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n str(self.outfile.absolute()),\n vcodec=\"copy\",\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n logger.info(\n f\"Video {self.video_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary, write the correct values of\n various metadata tags to the file. Videos are .mp4\"\"\"\n tags = dict()\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"comment\"]] = f\"https://tidal.com/browse/video/{self.video_id}\"\n tags[tag_map[\"date\"]] = str(self.metadata.release_date.date())\n tags[tag_map[\"title\"]] = self.metadata.title\n\n for tag in {\"composer\", \"director\", \"lyricist\", \"producer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.contributors, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n self.mutagen.save()\n\n def get(\n self,\n session: Session,\n out_dir: Path,\n metadata: Optional[\"VideosEndpointResponseJSON\"] = None,\n ) -> Optional[str]:\n \"\"\"The main method of this class. Executes a number of other methods\n in a row:\n - self.get_metadata()\n - self.get_contributors()\n - self.get_stream()\n - self.get_m3u8()\n - self.set_urls()\n - self.set_artist_dir()\n - self.set_filename()\n - self.set_outfile()\n - self.download()\n - self.craft_tags()\n - self.set_tags()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n return None\n\n self.get_contributors(session)\n self.get_stream(session)\n if self.stream is None:\n return None\n self.get_m3u8(session)\n self.set_urls()\n self.set_artist_dir(out_dir)\n self.set_filename(out_dir)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return None\n\n if self.download(session, out_dir) is None:\n return None\n\n self.craft_tags()\n self.set_tags()\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n json.dump({self.metadata.title: str(self.outfile.absolute())}, fp)\n\n def dumps(self) -> str:\n return json.dumps({self.metadata.title: str(self.outfile.absolute())})" } ]
from dataclasses import dataclass from pathlib import Path from types import SimpleNamespace from typing import Dict, List, Optional, Set, Tuple, Union from requests import HTTPError, Session from .media import AudioFormat from .models import ( PlaylistsEndpointResponseJSON, TracksEndpointResponseJSON, VideosEndpointResponseJSON, ) from .requesting import request_playlists from .track import Track from .utils import download_cover_image, temporary_file, TIDAL_API_URL from .video import Video import json import logging import math import shutil import sys import ffmpeg import mutagen
11,626
for subdir in subdirs: if subdir.exists(): shutil.rmtree(subdir) else: return self.playlist_dir def craft_m3u8_text(self): """This method creates a file called playlist.m3u8 in self.playlist_dir that is a standard M3U. Needs to be called after self.flatten_playlist_dir in order to be able to access self.files N.b. the already-written file is temporarily copied to a .mp4 version in a temporary directory because .m4a files cannot be read with mutagen.""" m3u_text: str = f"#EXTM3U\n#EXTENC:UTF-8\n#EXTIMG:{str(self.cover_path.absolute())}\n#PLAYLIST:{self.name}\n" logger.info( f"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'" ) for d in self.files: file: str = next(iter(d.values())) if file is None: continue elif file.endswith(".flac"): m = mutagen.File(file) artist: str = m.get("artist", [""])[0] title: str = m.get("title", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".mka"): m = mutagen.File(file) artist: str = m.get("ARTI", [""])[0] title: str = m.get("TITL", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".m4a"): # Mutagen cannot read .m4a files, so make a copy with all # of the metadata tags as a .mp4 in a temporary directory with temporary_file(suffix=".mp4") as tf: ffmpeg.input(file, hide_banner=None, y=None).output( tf.name, acodec="copy", vcodec="copy", loglevel="quiet", ).run() m = mutagen.File(tf.name) artist: str = m.get("\xa9ART", [""])[0] title: str = m.get("\xa9nam", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf else: return m3u_text def dumps(self): return json.dumps(self.files) def dump(self, fp=sys.stdout): json.dump(self.files, fp) def get(self, session: Session, audio_format: AudioFormat, out_dir: Path): """The main method of this class, executing a number of other methods in a row: - self.get_metadata() - self.set_items() - self.set_dir() - self.save_cover_image() - self.save_description() - self.get_items() - self.flatten_playlist_dir() """ self.get_metadata(session) if self.metadata is None: self.files = {} return self.set_items(session) self.set_dir(out_dir) self.save_cover_image(session, out_dir) try: self.save_description() except Exception: pass _get_items = self.get_items(session, audio_format) if _get_items is None: logger.critical(f"Could not retrieve playlist with ID '{self.playlist_id}'") return self.flatten_playlist_dir() try: m3u8_text: str = self.craft_m3u8_text() except Exception as e: logger.warning( "Unable to create playlist.m3u8 file for " f"playlist with ID '{self.playlist_id}'" ) logger.debug(e) else: with open(self.playlist_dir / "playlist.m3u8", "w") as f: f.write(m3u8_text) logger.info(f"Playlist files written to '{self.playlist_dir}'") class TidalPlaylistException(Exception): pass def request_playlist_items(session: Session, playlist_id: str) -> Optional[dict]: """Request from TIDAL API /playlists/items endpoint."""
logger = logging.getLogger("__name__") @dataclass class Playlist: playlist_id: str # UUID4 def __post_init__(self): self.playlist_dir: Optional[Path] = None self.playlist_cover_saved: bool = False def get_metadata(self, session: Session): """Request from TIDAL API /playlists endpoint""" self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists( session=session, identifier=self.playlist_id ) if self.metadata is None: return self.name = ( self.metadata.title.replace("/", "_") .replace("|", "_") .replace(":", " -") .replace('"', "") .replace("..", "") ) def set_items(self, session: Session): """Uses data from TIDAL API /playlists/items endpoint to populate self.items""" playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist( session=session, playlist_id=self.playlist_id ) if playlist_items is None: self.items = tuple() else: self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items) def set_dir(self, out_dir: Path): """Populates self.playlist_dir based on self.name, self.playlist_id""" playlist_substring: str = f"{self.name} [{self.playlist_id}]" self.playlist_dir: Path = out_dir / "Playlists" / playlist_substring self.playlist_dir.mkdir(parents=True, exist_ok=True) def save_cover_image(self, session: Session, out_dir: Path): """Requests self.metadata.image and attempts to write it to disk""" if self.playlist_dir is None: self.set_dir(out_dir=out_dir) self.cover_path: Path = self.playlist_dir / "cover.jpg" if not self.cover_path.exists(): download_cover_image( session=session, cover_uuid=self.metadata.square_image, output_dir=self.playlist_dir, dimension=1080, ) else: self.playlist_cover_saved = True def save_description(self): """Requests self.metadata.description and attempts to write it to disk""" description_path: Path = self.playlist_dir / "PlaylistDescription.txt" if self.metadata.description is not None and len(self.metadata.description) > 0: if not description_path.exists(): description_path.write_text(f"{self.metadata.description}\n") def get_items(self, session: Session, audio_format: AudioFormat): """Using either Track.get() or Video.get(), attempt to request the data for each track or video in self.items""" if len(self.items) == 0: return tracks_videos: list = [None] * len(self.items) for i, item in enumerate(self.items): if item is None: tracks_videos[i] = None continue elif isinstance(item, TracksEndpointResponseJSON): track: Track = Track(track_id=item.id) track.get( session=session, audio_format=audio_format, out_dir=self.playlist_dir, metadata=item, ) tracks_videos[i] = track elif isinstance(item, VideosEndpointResponseJSON): video: Video = Video(video_id=item.id) video.get( session=session, out_dir=self.playlist_dir, metadata=item, ) tracks_videos[i] = video else: tracks_videos[i] = None continue else: self.tracks_videos: Tuple[ Tuple[int, Optional[Union[Track, Video]]] ] = tuple(tracks_videos) return tracks_videos def flatten_playlist_dir(self): """When self.get_items() is called, the tracks and/or videos in self.items are downloaded using their self-contained .get() logic; this means that they will be downloaded to albums. This function "flattens" self.playlist_dir, meaning that it moves all downloaded audio and video files to self.playlist_dir, and removes the various subdirectories created""" files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos) if len(self.tracks_videos) == 0: return subdirs: Set[Path] = set() for i, tv in enumerate(self.tracks_videos, 1): if getattr(tv, "outfile") is None: try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) files[i - 1] = {i: None} continue _path: Optional[Path] = Path(tv.outfile) if tv is not None else None # if the item never got turned into a track or video if _path is None: files[i - 1] = {i: None} continue # if the track or video didn't download if _path.exists(): if _path.stat().st_size == 0: files[i - 1] = {i: None} continue else: files[i - 1] = {i: None} continue # otherwise, move files and clean up if isinstance(tv, Track): new_path: Path = self.playlist_dir / f"{i:03d} - {tv.trackname}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} elif isinstance(tv, Video): new_path: Path = self.playlist_dir / f"{i:03d} - {_path.name}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} else: self.files: List[Dict[int, Optional[str]]] = files # Find all subdirectories written to subdirs: Set[Path] = set() for tv in self.tracks_videos: if isinstance(tv, Track): try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) elif isinstance(tv, Video): subdirs.add(tv.artist_dir) # Copy all artist images, artist bio JSON files out # of subdirs artist_images: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*.jpg"): if p.name == "cover.jpg": continue artist_images.add(p) else: for artist_image_path in artist_images: if artist_image_path.exists(): shutil.copyfile( artist_image_path.absolute(), self.playlist_dir / artist_image_path.name, ) artist_bios: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*bio.json"): artist_bios.add(p) else: for artist_bio_path in artist_bios: if artist_bio_path.exists(): shutil.copyfile( artist_bio_path.absolute(), self.playlist_dir / artist_bio_path.name, ) # Remove all subdirs for subdir in subdirs: if subdir.exists(): shutil.rmtree(subdir) else: return self.playlist_dir def craft_m3u8_text(self): """This method creates a file called playlist.m3u8 in self.playlist_dir that is a standard M3U. Needs to be called after self.flatten_playlist_dir in order to be able to access self.files N.b. the already-written file is temporarily copied to a .mp4 version in a temporary directory because .m4a files cannot be read with mutagen.""" m3u_text: str = f"#EXTM3U\n#EXTENC:UTF-8\n#EXTIMG:{str(self.cover_path.absolute())}\n#PLAYLIST:{self.name}\n" logger.info( f"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'" ) for d in self.files: file: str = next(iter(d.values())) if file is None: continue elif file.endswith(".flac"): m = mutagen.File(file) artist: str = m.get("artist", [""])[0] title: str = m.get("title", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".mka"): m = mutagen.File(file) artist: str = m.get("ARTI", [""])[0] title: str = m.get("TITL", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".m4a"): # Mutagen cannot read .m4a files, so make a copy with all # of the metadata tags as a .mp4 in a temporary directory with temporary_file(suffix=".mp4") as tf: ffmpeg.input(file, hide_banner=None, y=None).output( tf.name, acodec="copy", vcodec="copy", loglevel="quiet", ).run() m = mutagen.File(tf.name) artist: str = m.get("\xa9ART", [""])[0] title: str = m.get("\xa9nam", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf else: return m3u_text def dumps(self): return json.dumps(self.files) def dump(self, fp=sys.stdout): json.dump(self.files, fp) def get(self, session: Session, audio_format: AudioFormat, out_dir: Path): """The main method of this class, executing a number of other methods in a row: - self.get_metadata() - self.set_items() - self.set_dir() - self.save_cover_image() - self.save_description() - self.get_items() - self.flatten_playlist_dir() """ self.get_metadata(session) if self.metadata is None: self.files = {} return self.set_items(session) self.set_dir(out_dir) self.save_cover_image(session, out_dir) try: self.save_description() except Exception: pass _get_items = self.get_items(session, audio_format) if _get_items is None: logger.critical(f"Could not retrieve playlist with ID '{self.playlist_id}'") return self.flatten_playlist_dir() try: m3u8_text: str = self.craft_m3u8_text() except Exception as e: logger.warning( "Unable to create playlist.m3u8 file for " f"playlist with ID '{self.playlist_id}'" ) logger.debug(e) else: with open(self.playlist_dir / "playlist.m3u8", "w") as f: f.write(m3u8_text) logger.info(f"Playlist files written to '{self.playlist_dir}'") class TidalPlaylistException(Exception): pass def request_playlist_items(session: Session, playlist_id: str) -> Optional[dict]: """Request from TIDAL API /playlists/items endpoint."""
url: str = f"{TIDAL_API_URL}/playlists/{playlist_id}/items"
8
2023-12-12 21:50:25+00:00
16k
Deltares/imod-python
imod/mf6/npf.py
[ { "identifier": "Package", "path": "imod/mf6/package.py", "snippet": "class Package(PackageBase, abc.ABC):\n \"\"\"\n Package is used to share methods for specific packages with no time\n component.\n\n It is not meant to be used directly, only to inherit from, to implement new\n packages.\n\n This class only supports `array input\n <https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=16>`_,\n not the list input which is used in :class:`BoundaryCondition`.\n \"\"\"\n\n _pkg_id = \"\"\n _init_schemata = {}\n _write_schemata = {}\n\n def __init__(self, allargs=None):\n super().__init__(allargs)\n\n def isel(self):\n raise NotImplementedError(\n \"Selection on packages not yet supported. To make a selection on \"\n f\"the xr.Dataset, call {self._pkg_id}.dataset.isel instead.\"\n \"You can create a new package with a selection by calling \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.isel(**selection))\"\n )\n\n def sel(self):\n raise NotImplementedError(\n \"Selection on packages not yet supported. To make a selection on \"\n f\"the xr.Dataset, call {self._pkg_id}.dataset.sel instead. \"\n \"You can create a new package with a selection by calling \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.sel(**selection))\"\n )\n\n def _valid(self, value):\n \"\"\"\n Filters values that are None, False, or a numpy.bool_ False.\n Needs to be this specific, since 0.0 and 0 are valid values, but are\n equal to a boolean False.\n \"\"\"\n # Test singletons\n if value is False or value is None:\n return False\n # Test numpy bool (not singleton)\n elif isinstance(value, np.bool_) and not value:\n return False\n # When dumping to netCDF and reading back, None will have been\n # converted into a NaN. Only check NaN if it's a floating type to avoid\n # TypeErrors.\n elif np.issubdtype(type(value), np.floating) and np.isnan(value):\n return False\n else:\n return True\n\n @staticmethod\n def _number_format(dtype: type):\n if np.issubdtype(dtype, np.integer):\n return \"%i\"\n elif np.issubdtype(dtype, np.floating):\n return \"%.18G\"\n else:\n raise TypeError(\"dtype should be either integer or float\")\n\n @staticmethod\n def _initialize_template(pkg_id):\n loader = jinja2.PackageLoader(\"imod\", \"templates/mf6\")\n env = jinja2.Environment(loader=loader, keep_trailing_newline=True)\n if pkg_id == \"ims\":\n fname = \"sln-ims.j2\"\n elif pkg_id == \"tdis\":\n fname = \"sim-tdis.j2\"\n elif pkg_id in TRANSPORT_PACKAGES:\n fname = f\"gwt-{pkg_id}.j2\"\n elif pkg_id in EXCHANGE_PACKAGES:\n fname = f\"exg-{pkg_id}.j2\"\n else:\n fname = f\"gwf-{pkg_id}.j2\"\n return env.get_template(fname)\n\n def write_blockfile(self, pkgname, globaltimes, write_context: WriteContext):\n directory = write_context.get_formatted_write_directory()\n\n content = self.render(\n directory=directory,\n pkgname=pkgname,\n globaltimes=globaltimes,\n binary=write_context.use_binary,\n )\n filename = write_context.write_directory / f\"{pkgname}.{self._pkg_id}\"\n with open(filename, \"w\") as f:\n f.write(content)\n\n def write_binary_griddata(self, outpath, da, dtype):\n # From the modflow6 source, the header is defined as:\n # integer(I4B) :: kstp --> np.int32 : 1\n # integer(I4B) :: kper --> np.int32 : 2\n # real(DP) :: pertim --> 2 * np.int32 : 4\n # real(DP) :: totim --> 2 * np.int32 : 6\n # character(len=16) :: text --> 4 * np.int32 : 10\n # integer(I4B) :: m1, m2, m3 --> 3 * np.int32 : 13\n # so writing 13 bytes suffices to create a header.\n\n # The following code is commented out due to modflow issue 189\n # https://github.com/MODFLOW-USGS/modflow6/issues/189\n # We never write LAYERED data.\n # The (structured) dis array reader results in an error if you try to\n # read a 3D botm array. By storing nlayer * nrow * ncol in the first\n # header entry, the array is read properly.\n\n # haslayer = \"layer\" in da.dims\n # if haslayer:\n # nlayer, nrow, ncol = da.shape\n # else:\n # nrow, ncol = da.shape\n # nlayer = 1\n\n # This is a work around for the abovementioned issue.\n nval = np.product(da.shape)\n header = np.zeros(13, np.int32)\n header[-3] = np.int32(nval) # ncol\n header[-2] = np.int32(1) # nrow\n header[-1] = np.int32(1) # nlayer\n\n with open(outpath, \"w\") as f:\n header.tofile(f)\n da.values.flatten().astype(dtype).tofile(f)\n\n def write_text_griddata(self, outpath, da, dtype):\n with open(outpath, \"w\") as f:\n # Note: reshaping here avoids writing newlines after every number.\n # This dumps all the values in a single row rather than a single\n # column. This is to be preferred, since editors can easily\n # \"reshape\" a long row with \"word wrap\"; they cannot as easily\n # ignore newlines.\n fmt = self._number_format(dtype)\n data = da.values\n if data.ndim > 2:\n np.savetxt(fname=f, X=da.values.reshape((1, -1)), fmt=fmt)\n else:\n np.savetxt(fname=f, X=da.values, fmt=fmt)\n\n def render(self, directory, pkgname, globaltimes, binary):\n d = {}\n if directory is None:\n pkg_directory = pkgname\n else:\n pkg_directory = pathlib.Path(directory) / pkgname\n\n for varname in self.dataset.data_vars:\n key = self._keyword_map.get(varname, varname)\n\n if hasattr(self, \"_grid_data\") and varname in self._grid_data:\n layered, value = self._compose_values(\n self.dataset[varname], pkg_directory, key, binary=binary\n )\n if self._valid(value): # skip False or None\n d[f\"{key}_layered\"], d[key] = layered, value\n else:\n value = self[varname].values[()]\n if self._valid(value): # skip False or None\n d[key] = value\n\n if (hasattr(self, \"_auxiliary_data\")) and (names := get_variable_names(self)):\n d[\"auxiliary\"] = names\n\n return self._template.render(d)\n\n @staticmethod\n def _is_xy_data(obj):\n if isinstance(obj, (xr.DataArray, xr.Dataset)):\n xy = \"x\" in obj.dims and \"y\" in obj.dims\n elif isinstance(obj, (xu.UgridDataArray, xu.UgridDataset)):\n xy = obj.ugrid.grid.face_dimension in obj.dims\n else:\n raise TypeError(\n \"obj should be DataArray or UgridDataArray, \"\n f\"received {type(obj)} instead\"\n )\n return xy\n\n def _compose_values(self, da, directory, name, binary):\n \"\"\"\n Compose values of dictionary.\n\n Ignores times. Time dependent boundary conditions use the method from\n BoundaryCondition.\n\n See documentation of wq\n \"\"\"\n layered = False\n values = []\n if self._is_xy_data(da):\n if binary:\n path = (directory / f\"{name}.bin\").as_posix()\n values.append(f\"open/close {path} (binary)\")\n else:\n path = (directory / f\"{name}.dat\").as_posix()\n values.append(f\"open/close {path}\")\n else:\n if \"layer\" in da.dims:\n layered = True\n for layer in da.coords[\"layer\"]:\n values.append(f\"constant {da.sel(layer=layer).values[()]}\")\n else:\n value = da.values[()]\n if self._valid(value): # skip None or False\n values.append(f\"constant {value}\")\n else:\n values = None\n\n return layered, values\n\n def write(\n self,\n pkgname: str,\n globaltimes: Union[List, np.ndarray],\n write_context: WriteContext,\n ):\n directory = write_context.write_directory\n binary = write_context.use_binary\n self.write_blockfile(pkgname, globaltimes, write_context)\n\n if hasattr(self, \"_grid_data\"):\n if self._is_xy_data(self.dataset):\n pkgdirectory = directory / pkgname\n pkgdirectory.mkdir(exist_ok=True, parents=True)\n for varname, dtype in self._grid_data.items():\n key = self._keyword_map.get(varname, varname)\n da = self.dataset[varname]\n if self._is_xy_data(da):\n if binary:\n path = pkgdirectory / f\"{key}.bin\"\n self.write_binary_griddata(path, da, dtype)\n else:\n path = pkgdirectory / f\"{key}.dat\"\n self.write_text_griddata(path, da, dtype)\n\n def _validate(self, schemata: Dict, **kwargs) -> Dict[str, List[ValidationError]]:\n errors = defaultdict(list)\n for variable, var_schemata in schemata.items():\n for schema in var_schemata:\n if (\n variable in self.dataset.keys()\n ): # concentration only added to dataset if specified\n try:\n schema.validate(self.dataset[variable], **kwargs)\n except ValidationError as e:\n errors[variable].append(e)\n return errors\n\n def is_empty(self) -> bool:\n \"\"\"\n Returns True if the package is empty- for example if it contains only no-data values.\n \"\"\"\n\n # Create schemata dict only containing the\n # variables with a AllNoDataSchema and EmptyIndexesSchema (in case of\n # HFB) in the write schemata.\n allnodata_schemata = filter_schemata_dict(\n self._write_schemata, (AllNoDataSchema, EmptyIndexesSchema)\n )\n\n # Find if packages throws ValidationError for AllNoDataSchema or\n # EmptyIndexesSchema.\n allnodata_errors = self._validate(allnodata_schemata)\n return len(allnodata_errors) > 0\n\n def _validate_init_schemata(self, validate: bool):\n \"\"\"\n Run the \"cheap\" schema validations.\n\n The expensive validations are run during writing. Some are only\n available then: e.g. idomain to determine active part of domain.\n \"\"\"\n if not validate:\n return\n errors = self._validate(self._init_schemata)\n if len(errors) > 0:\n message = validation_pkg_error_message(errors)\n raise ValidationError(message)\n return\n\n def _get_vars_to_check(self):\n \"\"\"\n Helper function to get all variables which were not set to None\n \"\"\"\n variables = []\n for var in self._metadata_dict.keys():\n if ( # Filter optional variables not filled in\n self.dataset[var].size != 1\n ) or (\n self.dataset[var] != None # noqa: E711\n ):\n variables.append(var)\n\n return variables\n\n def copy(self) -> Any:\n # All state should be contained in the dataset.\n return type(self)(**self.dataset.copy())\n\n @staticmethod\n def _clip_repeat_stress(\n repeat_stress: xr.DataArray,\n time,\n time_start,\n time_end,\n ):\n \"\"\"\n Selection may remove the original data which are repeated.\n These should be re-inserted at the first occuring \"key\".\n Next, remove these keys as they've been \"promoted\" to regular\n timestamps with data.\n \"\"\"\n # First, \"pop\" and filter.\n keys, values = repeat_stress.values.T\n keep = (keys >= time_start) & (keys <= time_end)\n new_keys = keys[keep]\n new_values = values[keep]\n # Now detect which \"value\" entries have gone missing\n insert_values, index = np.unique(new_values, return_index=True)\n insert_keys = new_keys[index]\n # Setup indexer\n indexer = xr.DataArray(\n data=np.arange(time.size),\n coords={\"time\": time},\n dims=(\"time\",),\n ).sel(time=insert_values)\n indexer[\"time\"] = insert_keys\n\n # Update the key-value pairs. Discard keys that have been \"promoted\".\n keep = np.in1d(new_keys, insert_keys, assume_unique=True, invert=True)\n new_keys = new_keys[keep]\n new_values = new_values[keep]\n # Set the values to their new source.\n new_values = insert_keys[np.searchsorted(insert_values, new_values)]\n repeat_stress = xr.DataArray(\n data=np.column_stack((new_keys, new_values)),\n dims=(\"repeat\", \"repeat_items\"),\n )\n return indexer, repeat_stress\n\n @staticmethod\n def _clip_time_indexer(\n time,\n time_start,\n time_end,\n ):\n original = xr.DataArray(\n data=np.arange(time.size),\n coords={\"time\": time},\n dims=(\"time\",),\n )\n indexer = original.sel(time=slice(time_start, time_end))\n\n # The selection might return a 0-sized dimension.\n if indexer.size > 0:\n first_time = indexer[\"time\"].values[0]\n else:\n first_time = None\n\n # If the first time matches exactly, xarray will have done thing we\n # wanted and our work with the time dimension is finished.\n if (time_start is not None) and (time_start != first_time):\n # If the first time is before the original time, we need to\n # backfill; otherwise, we need to ffill the first timestamp.\n if time_start < time[0]:\n method = \"bfill\"\n else:\n method = \"ffill\"\n # Index with a list rather than a scalar to preserve the time\n # dimension.\n first = original.sel(time=[time_start], method=method)\n first[\"time\"] = [time_start]\n indexer = xr.concat([first, indexer], dim=\"time\")\n\n return indexer\n\n def __to_datetime(self, time, use_cftime):\n \"\"\"\n Helper function that converts to datetime, except when None.\n \"\"\"\n if time is None:\n return time\n else:\n return imod.wq.timeutil.to_datetime(time, use_cftime)\n\n def clip_box(\n self,\n time_min=None,\n time_max=None,\n layer_min=None,\n layer_max=None,\n x_min=None,\n x_max=None,\n y_min=None,\n y_max=None,\n state_for_boundary=None,\n ) -> \"Package\":\n \"\"\"\n Clip a package by a bounding box (time, layer, y, x).\n\n Slicing intervals may be half-bounded, by providing None:\n\n * To select 500.0 <= x <= 1000.0:\n ``clip_box(x_min=500.0, x_max=1000.0)``.\n * To select x <= 1000.0: ``clip_box(x_min=None, x_max=1000.0)``\n or ``clip_box(x_max=1000.0)``.\n * To select x >= 500.0: ``clip_box(x_min = 500.0, x_max=None.0)``\n or ``clip_box(x_min=1000.0)``.\n\n Parameters\n ----------\n time_min: optional\n time_max: optional\n layer_min: optional, int\n layer_max: optional, int\n x_min: optional, float\n x_max: optional, float\n y_min: optional, float\n y_max: optional, float\n\n Returns\n -------\n clipped: Package\n \"\"\"\n selection = self.dataset\n if \"time\" in selection:\n time = selection[\"time\"].values\n use_cftime = isinstance(time[0], cftime.datetime)\n time_start = self.__to_datetime(time_min, use_cftime)\n time_end = self.__to_datetime(time_max, use_cftime)\n\n indexer = self._clip_time_indexer(\n time=time,\n time_start=time_start,\n time_end=time_end,\n )\n\n if \"repeat_stress\" in selection.data_vars and self._valid(\n selection[\"repeat_stress\"].values[()]\n ):\n repeat_indexer, repeat_stress = self._clip_repeat_stress(\n repeat_stress=selection[\"repeat_stress\"],\n time=time,\n time_start=time_start,\n time_end=time_end,\n )\n selection = selection.drop_vars(\"repeat_stress\")\n selection[\"repeat_stress\"] = repeat_stress\n indexer = repeat_indexer.combine_first(indexer).astype(int)\n\n selection = selection.drop_vars(\"time\").isel(time=indexer)\n\n if \"layer\" in selection.coords:\n layer_slice = slice(layer_min, layer_max)\n # Cannot select if it's not a dimension!\n if \"layer\" not in selection.dims:\n selection = (\n selection.expand_dims(\"layer\")\n .sel(layer=layer_slice)\n .squeeze(\"layer\")\n )\n else:\n selection = selection.sel(layer=layer_slice)\n\n x_slice = slice(x_min, x_max)\n y_slice = slice(y_min, y_max)\n if isinstance(selection, xu.UgridDataset):\n selection = selection.ugrid.sel(x=x_slice, y=y_slice)\n elif (\"x\" in selection.coords) and (\"y\" in selection.coords):\n if selection.indexes[\"y\"].is_monotonic_decreasing:\n y_slice = slice(y_max, y_min)\n selection = selection.sel(x=x_slice, y=y_slice)\n\n cls = type(self)\n new = cls.__new__(cls)\n new.dataset = selection\n return new\n\n def mask(self, domain: GridDataArray) -> Any:\n \"\"\"\n Mask values outside of domain.\n\n Floating values outside of the condition are set to NaN (nodata).\n Integer values outside of the condition are set to 0 (inactive in\n MODFLOW terms).\n\n Parameters\n ----------\n domain: xr.DataArray of integers. Preservers values where domain is larger than 0.\n\n Returns\n -------\n masked: Package\n The package with part masked.\n \"\"\"\n masked = {}\n for var in self.dataset.data_vars.keys():\n da = self.dataset[var]\n if self.skip_masking_dataarray(var):\n masked[var] = da\n continue\n if set(domain.dims).issubset(da.dims):\n if issubclass(da.dtype.type, numbers.Integral):\n masked[var] = da.where(domain > 0, other=0)\n elif issubclass(da.dtype.type, numbers.Real):\n masked[var] = da.where(domain > 0)\n else:\n raise TypeError(\n f\"Expected dtype float or integer. Received instead: {da.dtype}\"\n )\n else:\n if da.values[()] is not None:\n if is_scalar(da.values[()]):\n masked[var] = da.values[()] # For scalars, such as options\n else:\n masked[\n var\n ] = da # For example for arrays with only a layer dimension\n else:\n masked[var] = None\n\n return type(self)(**masked)\n\n def is_regridding_supported(self) -> bool:\n \"\"\"\n returns true if package supports regridding.\n \"\"\"\n return hasattr(self, \"_regrid_method\")\n\n def get_regrid_methods(self) -> Optional[Dict[str, Tuple[RegridderType, str]]]:\n if self.is_regridding_supported():\n return self._regrid_method\n return None\n\n def _regrid_array(\n self,\n varname: str,\n regridder_collection: RegridderInstancesCollection,\n regridder_name: str,\n regridder_function: str,\n target_grid: GridDataArray,\n ) -> Optional[GridDataArray]:\n \"\"\"\n Regrids a data_array. The array is specified by its key in the dataset.\n Each data-array can represent:\n -a scalar value, valid for the whole grid\n -an array of a different scalar per layer\n -an array with a value per grid block\n -None\n \"\"\"\n\n # skip regridding for arrays with no valid values (such as \"None\")\n if not self._valid(self.dataset[varname].values[()]):\n return None\n\n # the dataarray might be a scalar. If it is, then it does not need regridding.\n if is_scalar(self.dataset[varname]):\n return self.dataset[varname].values[()]\n\n if isinstance(self.dataset[varname], xr.DataArray):\n coords = self.dataset[varname].coords\n # if it is an xr.DataArray it may be layer-based; then no regridding is needed\n if not (\"x\" in coords and \"y\" in coords):\n return self.dataset[varname]\n\n # if it is an xr.DataArray it needs the dx, dy coordinates for regridding, which are otherwise not mandatory\n if not (\"dx\" in coords and \"dy\" in coords):\n raise ValueError(\n f\"DataArray {varname} does not have both a dx and dy coordinates\"\n )\n\n # obtain an instance of a regridder for the chosen method\n regridder = regridder_collection.get_regridder(\n regridder_name,\n regridder_function,\n )\n\n # store original dtype of data\n original_dtype = self.dataset[varname].dtype\n\n # regrid data array\n regridded_array = regridder.regrid(self.dataset[varname])\n\n # reconvert the result to the same dtype as the original\n return regridded_array.astype(original_dtype)\n\n def regrid_like(\n self,\n target_grid: GridDataArray,\n regridder_types: Dict[str, Tuple[RegridderType, str]] = None,\n ) -> \"Package\":\n \"\"\"\n Creates a package of the same type as this package, based on another discretization.\n It regrids all the arrays in this package to the desired discretization, and leaves the options\n unmodified. At the moment only regridding to a different planar grid is supported, meaning\n ``target_grid`` has different ``\"x\"`` and ``\"y\"`` or different ``cell2d`` coords.\n\n The regridding methods can be specified in the _regrid_method attribute of the package. These are the defaults\n that specify how each array should be regridded. These defaults can be overridden using the input\n parameters of this function.\n\n Examples\n --------\n To regrid the npf package with a non-default method for the k-field, call regrid_like with these arguments:\n\n >>> new_npf = npf.regrid_like(like, {\"k\": (imod.RegridderType.OVERLAP, \"mean\")})\n\n\n Parameters\n ----------\n target_grid: xr.DataArray or xu.UgridDataArray\n a grid defined over the same discretization as the one we want to regrid the package to\n regridder_types: dict(str->(regridder type,str))\n dictionary mapping arraynames (str) to a tuple of regrid type (a specialization class of BaseRegridder) and function name (str)\n this dictionary can be used to override the default mapping method.\n\n Returns\n -------\n a package with the same options as this package, and with all the data-arrays regridded to another discretization,\n similar to the one used in input argument \"target_grid\"\n \"\"\"\n if not self.is_regridding_supported():\n raise NotImplementedError(\n f\"Package {type(self).__name__} does not support regridding\"\n )\n\n regridder_collection = RegridderInstancesCollection(\n self.dataset, target_grid=target_grid\n )\n\n regridder_settings = copy.deepcopy(self._regrid_method)\n if regridder_types is not None:\n regridder_settings.update(regridder_types)\n\n new_package_data = get_non_grid_data(self, list(regridder_settings.keys()))\n\n for (\n varname,\n regridder_type_and_function,\n ) in regridder_settings.items():\n regridder_name, regridder_function = regridder_type_and_function\n\n # skip variables that are not in this dataset\n if varname not in self.dataset.keys():\n continue\n\n # regrid the variable\n new_package_data[varname] = self._regrid_array(\n varname,\n regridder_collection,\n regridder_name,\n regridder_function,\n target_grid,\n )\n\n new_package = self.__class__(**new_package_data)\n\n return new_package\n\n def skip_masking_dataarray(self, array_name: str) -> bool:\n if hasattr(self, \"_skip_mask_arrays\"):\n return array_name in self._skip_mask_arrays\n return False\n\n @classmethod\n def is_grid_agnostic_package(cls) -> bool:\n return False\n\n def __repr__(self) -> str:\n typename = type(self).__name__\n return f\"{typename}\\n{self.dataset.__repr__()}\"\n\n def _repr_html_(self) -> str:\n typename = type(self).__name__\n return f\"<div>{typename}</div>{self.dataset._repr_html_()}\"" }, { "identifier": "RegridderType", "path": "imod/mf6/regridding_utils.py", "snippet": "class RegridderType(Enum):\n \"\"\"\n Enumerator referring to regridder types in ``xugrid``.\n These can be used safely in scripts, remaining backwards compatible for\n when it is decided to rename regridders in ``xugrid``. For an explanation\n what each regridder type does, we refer to the `xugrid documentation <https://deltares.github.io/xugrid/examples/regridder_overview.html>`_\n \"\"\"\n\n CENTROIDLOCATOR = xu.CentroidLocatorRegridder\n BARYCENTRIC = xu.BarycentricInterpolator\n OVERLAP = xu.OverlapRegridder\n RELATIVEOVERLAP = xu.RelativeOverlapRegridder" }, { "identifier": "PKG_DIMS_SCHEMA", "path": "imod/mf6/validation.py", "snippet": "PKG_DIMS_SCHEMA = (\n DimsSchema(\"layer\", \"y\", \"x\")\n | DimsSchema(\"layer\", \"{face_dim}\")\n | DimsSchema(\"layer\")\n | DimsSchema()\n)" }, { "identifier": "AllValueSchema", "path": "imod/schemata.py", "snippet": "class AllValueSchema(ValueSchema):\n \"\"\"\n Validate whether all values pass a condition.\n\n E.g. if operator is \">\":\n\n assert (values > threshold).all()\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n if isinstance(self.other, str):\n other_obj = kwargs[self.other]\n else:\n other_obj = self.other\n\n if scalar_None(obj) or scalar_None(other_obj):\n return\n\n explicitly_ignored = self.get_explicitly_ignored(kwargs)\n\n ignore = (\n np.isnan(obj) | np.isnan(other_obj) | explicitly_ignored\n ) # ignore nan by setting to True\n\n condition = self.operator(obj, other_obj)\n condition = condition | ignore\n if not condition.all():\n raise ValidationError(\n f\"not all values comply with criterion: {self.operator_str} {self.other}\"\n )" }, { "identifier": "DTypeSchema", "path": "imod/schemata.py", "snippet": "class DTypeSchema(BaseSchema):\n def __init__(self, dtype: DTypeLike) -> None:\n if dtype in [\n np.floating,\n np.integer,\n np.signedinteger,\n np.unsignedinteger,\n np.generic,\n ]:\n self.dtype = dtype\n else:\n self.dtype = np.dtype(dtype)\n\n def validate(self, obj: xr.DataArray, **kwargs) -> None:\n \"\"\"\n Validate dtype\n\n Parameters\n ----------\n dtype : Any\n Dtype of the DataArray.\n \"\"\"\n if scalar_None(obj):\n return\n\n if not np.issubdtype(obj.dtype, self.dtype):\n raise ValidationError(f\"dtype {obj.dtype} != {self.dtype}\")" }, { "identifier": "IdentityNoDataSchema", "path": "imod/schemata.py", "snippet": "class IdentityNoDataSchema(NoDataComparisonSchema):\n \"\"\"\n Checks that the NoData values are located at exactly the same locations.\n\n Tests only if if all dimensions of the other object are present in the\n object. So tests if \"stage\" with `{time, layer, y, x}` compared to \"idomain\"\n `{layer, y, x}` but doesn't test if \"k\" with `{layer}` is comperated to\n \"idomain\" `{layer, y, x}`\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n other_obj = kwargs[self.other]\n\n # Only test if object has all dimensions in other object.\n missing_dims = set(other_obj.dims) - set(obj.dims)\n\n if len(missing_dims) == 0:\n valid = self.is_notnull(obj)\n other_valid = self.is_other_notnull(other_obj)\n if (valid ^ other_valid).any():\n raise ValidationError(f\"nodata is not aligned with {self.other}\")" }, { "identifier": "IndexesSchema", "path": "imod/schemata.py", "snippet": "class IndexesSchema(EmptyIndexesSchema):\n \"\"\"\n Verify indexes, check if no dims with zero size are included and that\n indexes are monotonic. Skips unstructured grid dimensions.\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n # Test if indexes all empty\n super().validate(obj)\n\n dims_to_validate = self.get_dims_to_validate(obj)\n\n for dim in dims_to_validate:\n if dim == \"y\":\n if not obj.indexes[dim].is_monotonic_decreasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically decreasing\"\n )\n\n else:\n if not obj.indexes[dim].is_monotonic_increasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically increasing\"\n )" } ]
import warnings import numpy as np from imod.mf6.package import Package from imod.mf6.regridding_utils import RegridderType from imod.mf6.validation import PKG_DIMS_SCHEMA from imod.schemata import ( AllValueSchema, DTypeSchema, IdentityNoDataSchema, IndexesSchema, )
11,181
Default is False. save_saturation: ({True, False}, optional) keyword to indicate that cell saturation will be written to the budget file, which is specified with "BUDGET SAVE FILE" in Output Control. Saturation will be saved to the budget file as an auxiliary variable saved with the DATA-SAT text label. Saturation is a cell variable that ranges from zero to one and can be used by post processing programs to determine how much of a cell volume is saturated. If ICELLTYPE is 0, then saturation is always one. xt3d_option: ({True, False}, optional) If True, the XT3D formulation will be used. By default False. rhs_option: ({True, False}, optional) If True, then the XT3D additional terms will be added to the right-hand side. If False, then the XT3D terms will be put into the coefficient matrix. By default False. validate: {True, False} Flag to indicate whether the package should be validated upon initialization. This raises a ValidationError if package input is provided in the wrong manner. Defaults to True. """ _pkg_id = "npf" _init_schemata = { "icelltype": [ DTypeSchema(np.integer), IndexesSchema(), PKG_DIMS_SCHEMA, ], "k": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "rewet_layer": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "k22": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "k33": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "angle1": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "angle2": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "angle3": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "alternative_cell_averaging": [DTypeSchema(str)], "save_flows": [DTypeSchema(np.bool_)], "starting_head_as_confined_thickness": [DTypeSchema(np.bool_)], "variable_vertical_conductance": [DTypeSchema(np.bool_)], "dewatered": [DTypeSchema(np.bool_)], "perched": [DTypeSchema(np.bool_)], "save_specific_discharge": [DTypeSchema(np.bool_)], } _write_schemata = { "k": ( AllValueSchema(">", 0.0), IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), ), "rewet_layer": ( IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), ), "k22": ( AllValueSchema(">", 0.0), IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), # No need to check coords: dataset ensures they align with idomain. ), "k33": ( AllValueSchema(">", 0.0), IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), # No need to check coords: dataset ensures they align with idomain. ), "angle1": (IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)),), "angle2": (IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)),), "angle3": (IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)),), } _grid_data = { "icelltype": np.int32, "k": np.float64, "rewet_layer": np.float64, "k22": np.float64, "k33": np.float64, "angle1": np.float64, "angle2": np.float64, "angle3": np.float64, } _keyword_map = { "rewet": "rewet_record", "rewet_factor": "wetfct", "rewet_method": "ihdwet", "rewet_layer": "wetdry", "variable_vertical_conductance": "variablecv", "starting_head_as_confined_thickness": "thickstrt", "rewet_iterations": "iwetit", "xt3d_option": "xt3doptions", "rhs_option": "rhs", } _template = Package._initialize_template(_pkg_id) _regrid_method = {
class NodePropertyFlow(Package): """ Node Property Flow package. In this package the hydraulic conductivity and rewetting in the model is specified. A single NPF Package is required for each GWF model. https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=51 A note about regridding: the fields k, k22, k33 define the principal components of an anisotropic conductivity tensor. By default, k and k22 are in the horizontal plane and k33 is vertical. Angle1, angle2 and angle3 define the rotation of this tensor. The regridding methods associated by default are chosen based on the assumption that k and k22 are horizontal and k33 is vertical. If this is not the case, it is up to the user to regrid the npf package using other regridding methods. This may be recommended if for example the rotation is such that k has become vertical and k33 horizontal. Parameters ---------- icelltype: array of int (xr.DataArray) flag for each cell that specifies how saturated thickness is treated. 0 means saturated thickness is held constant; >0 means saturated thickness varies with computed head when head is below the cell top; <0 means saturated thickness varies with computed head unless the starting_head_as_confined_thickness option is in effect. When starting_head_as_confined_thickness is in effect, a negative value of icelltype indicates that saturated thickness will be computed as strt-bot and held constant. k: array of floats (xr.DataArray) is the hydraulic conductivity. For the common case in which the user would like to specify the horizontal hydraulic conductivity and the vertical hydraulic conductivity, then K should be assigned as the horizontal hydraulic conductivity, K33 should be assigned as the vertical hydraulic conductivity, and K22 and the three rotation angles should not be specified. When more sophisticated anisotropy is required, then K corresponds to the K11 hydraulic conductivity axis. All included cells (idomain > 0) must have a K value greater than zero rewet: ({True, False}, optional) activates model rewetting. Default is False. rewet_layer: float is a combination of the wetting threshold and a flag to indicate which neighboring cells can cause a cell to become wet. If rewet_layer < 0, only a cell below a dry cell can cause the cell to become wet. If rewet_layer > 0, the cell below a dry cell and horizontally adjacent cells can cause a cell to become wet. If rewet_layer is 0, the cell cannot be wetted. The absolute value of rewet_layer is the wetting threshold. When the sum of BOT and the absolute value of rewet_layer at a dry cell is equaled or exceeded by the head at an adjacent cell, the cell is wetted. rewet_layer must be specified if "rewet" is specified in the OPTIONS block. If "rewet" is not specified in the options block, then rewet_layer can be entered, and memory will be allocated for it, even though it is not used. (WETDRY) Default is None. rewet_factor: is a keyword and factor that is included in the calculation of the head that is initially established at a cell when that cell is converted from dry to wet. (WETFCT) Default is None. rewet_iterations: is a keyword and iteration interval for attempting to wet cells. Wetting is attempted every rewet_iterations iteration. This applies to outer iterations and not inner iterations. If rewet_iterations is specified as zero or less, then the value is changed to 1. (IWETIT) Default is None. rewet_method: is a keyword and integer flag that determines which equation is used to define the initial head at cells that become wet. If rewet_method is 0, h = BOT + rewet_factor (hm - BOT). If rewet_method is not 0, h = BOT + rewet_factor (THRESH). (IHDWET) Default is None. k22: array of floats (xr.DataArray) is the hydraulic conductivity of the second ellipsoid axis; for an unrotated case this is the hydraulic conductivity in the y direction. If K22 is not included, then K22 is set equal to K. For a regular MODFLOW grid (DIS Package is used) in which no rotation angles are specified, K22 is the hydraulic conductivity along columns in the y direction. For an unstructured DISU grid, the user must assign principal x and y axes and provide the angle for each cell face relative to the assigned x direction. All included cells (idomain > 0) must have a K22 value greater than zero. Default is None. k33: array of floats (xr.DataArray) is the hydraulic conductivity of the third ellipsoid axis; for an unrotated case, this is the vertical hydraulic conductivity. When anisotropy is applied, K33 corresponds to the K33 tensor component. All included cells (idomain > 0) must have a K33 value greater than zero. Default is None. angle1: float is a rotation angle of the hydraulic conductivity tensor in degrees. The angle represents the first of three sequential rotations of the hydraulic conductivity ellipsoid. With the K11, K22, and K33 axes of the ellipsoid initially aligned with the x, y, and z coordinate axes, respectively, angle1 rotates the ellipsoid about its K33 axis (within the x - y plane). A positive value represents counter-clockwise rotation when viewed from any point on the positive K33 axis, looking toward the center of the ellipsoid. A value of zero indicates that the K11 axis lies within the x - z plane. If angle1 is not specified, default values of zero are assigned to angle1, angle2, and angle3, in which case the K11, K22, and K33 axes are aligned with the x, y, and z axes, respectively. Default is None. angle2: float is a rotation angle of the hydraulic conductivity tensor in degrees. The angle represents the second of three sequential rotations of the hydraulic conductivity ellipsoid. Following the rotation by angle1 described above, angle2 rotates the ellipsoid about its K22 axis (out of the x - y plane). An array can be specified for angle2 only if angle1 is also specified. A positive value of angle2 represents clockwise rotation when viewed from any point on the positive K22 axis, looking toward the center of the ellipsoid. A value of zero indicates that the K11 axis lies within the x - y plane. If angle2 is not specified, default values of zero are assigned to angle2 and angle3; connections that are not user-designated as vertical are assumed to be strictly horizontal (that is, to have no z component to their orientation); and connection lengths are based on horizontal distances. Default is None. angle3: float is a rotation angle of the hydraulic conductivity tensor in degrees. The angle represents the third of three sequential rotations of the hydraulic conductivity ellipsoid. Following the rotations by angle1 and angle2 described above, angle3 rotates the ellipsoid about its K11 axis. An array can be specified for angle3 only if angle1 and angle2 are also specified. An array must be specified for angle3 if angle2 is specified. A positive value of angle3 represents clockwise rotation when viewed from any point on the positive K11 axis, looking toward the center of the ellipsoid. A value of zero indicates that the K22 axis lies within the x - y plane. Default is None. alternative_cell_averaging : str Method calculating horizontal cell connection conductance. Options: {"LOGARITHMIC", "AMT-LMK", or "AMT-HMK"} Default: uses harmonic mean for averaging save_flows: ({True, False}, optional) keyword to indicate that cell-by-cell flow terms will be written to the file specified with "budget save file" in Output Control. Default is False. starting_head_as_confined_thickness: ({True, False}, optional) indicates that cells having a negative icelltype are confined, and their cell thickness for conductance calculations will be computed as strt-bot rather than top-bot. (THICKSTRT) Default is False. variable_vertical_conductance: ({True, False}, optional) keyword to indicate that the vertical conductance will be calculated using the saturated thickness and properties of the overlying cell and the thickness and properties of the underlying cell. if the dewatered keyword is also specified, then the vertical conductance is calculated using only the saturated thickness and properties of the overlying cell if the head in the underlying cell is below its top. if these keywords are not specified, then the default condition is to calculate the vertical conductance at the start of the simulation using the initial head and the cell properties. the vertical conductance remains constant for the entire simulation. (VARIABLECV) Default is False. dewatered: ({True, False}, optional) If the dewatered keyword is specified, then the vertical conductance is calculated using only the saturated thickness and properties of the overlying cell if the head in the underlying cell is below its top. Default is False. perched: ({True, False}, optional) keyword to indicate that when a cell is overlying a dewatered convertible cell, the head difference used in Darcy’s Law is equal to the head in the overlying cell minus the bottom elevation of the overlying cell. If not specified, then the default is to use the head difference between the two cells. Default is False. save_specific_discharge: ({True, False}, optional) keyword to indicate that x, y, and z components of specific discharge will be calculated at cell centers and written to the cell-by-cell flow file, which is specified with"budget save file" in Output Control. If this option is activated, then additional information may be required in the discretization packages and the GWF Exchange package (if GWF models are coupled). Specifically, angldegx must be specified in the connectiondata block of the disu package; angldegx must also be specified for the GWF Exchange as an auxiliary variable. disu package has not been implemented yet. Default is False. save_saturation: ({True, False}, optional) keyword to indicate that cell saturation will be written to the budget file, which is specified with "BUDGET SAVE FILE" in Output Control. Saturation will be saved to the budget file as an auxiliary variable saved with the DATA-SAT text label. Saturation is a cell variable that ranges from zero to one and can be used by post processing programs to determine how much of a cell volume is saturated. If ICELLTYPE is 0, then saturation is always one. xt3d_option: ({True, False}, optional) If True, the XT3D formulation will be used. By default False. rhs_option: ({True, False}, optional) If True, then the XT3D additional terms will be added to the right-hand side. If False, then the XT3D terms will be put into the coefficient matrix. By default False. validate: {True, False} Flag to indicate whether the package should be validated upon initialization. This raises a ValidationError if package input is provided in the wrong manner. Defaults to True. """ _pkg_id = "npf" _init_schemata = { "icelltype": [ DTypeSchema(np.integer), IndexesSchema(), PKG_DIMS_SCHEMA, ], "k": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "rewet_layer": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "k22": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "k33": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "angle1": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "angle2": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "angle3": [ DTypeSchema(np.floating), IndexesSchema(), PKG_DIMS_SCHEMA, ], "alternative_cell_averaging": [DTypeSchema(str)], "save_flows": [DTypeSchema(np.bool_)], "starting_head_as_confined_thickness": [DTypeSchema(np.bool_)], "variable_vertical_conductance": [DTypeSchema(np.bool_)], "dewatered": [DTypeSchema(np.bool_)], "perched": [DTypeSchema(np.bool_)], "save_specific_discharge": [DTypeSchema(np.bool_)], } _write_schemata = { "k": ( AllValueSchema(">", 0.0), IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), ), "rewet_layer": ( IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), ), "k22": ( AllValueSchema(">", 0.0), IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), # No need to check coords: dataset ensures they align with idomain. ), "k33": ( AllValueSchema(">", 0.0), IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)), # No need to check coords: dataset ensures they align with idomain. ), "angle1": (IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)),), "angle2": (IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)),), "angle3": (IdentityNoDataSchema(other="idomain", is_other_notnull=(">", 0)),), } _grid_data = { "icelltype": np.int32, "k": np.float64, "rewet_layer": np.float64, "k22": np.float64, "k33": np.float64, "angle1": np.float64, "angle2": np.float64, "angle3": np.float64, } _keyword_map = { "rewet": "rewet_record", "rewet_factor": "wetfct", "rewet_method": "ihdwet", "rewet_layer": "wetdry", "variable_vertical_conductance": "variablecv", "starting_head_as_confined_thickness": "thickstrt", "rewet_iterations": "iwetit", "xt3d_option": "xt3doptions", "rhs_option": "rhs", } _template = Package._initialize_template(_pkg_id) _regrid_method = {
"icelltype": (RegridderType.OVERLAP, "mean"),
1
2023-12-08 13:57:59+00:00
16k
camenduru/MotionDirector-hf
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n \n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n frame_step: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.frame_step = frame_step\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_range(self, vr):\n return get_video_frames(\n vr, \n self.sample_start_idx, \n self.frame_step, \n self.n_sample_frames\n )\n \n def get_vid_idx(self, vr, vid_data=None):\n frames = self.n_sample_frames\n\n if vid_data is not None:\n idx = vid_data['frame_index']\n else:\n idx = self.sample_start_idx\n\n return idx\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n frame_range = self.get_frame_range(vr)\n frames = vr.get_batch(frame_range)\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def train_data_batch(self, index):\n\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n\n vid_data = self.train_data[index]\n\n clip_path = vid_data['clip_path']\n \n # Get video prompt\n prompt = vid_data['prompt']\n\n video, _ = self.process_video_wrapper(clip_path)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n # Assign train data\n train_data = self.train_data[index]\n \n # Get the frame of the current index.\n self.sample_start_idx = train_data['frame_index']\n \n # Initialize resize\n resize = None\n\n video, vr = self.process_video_wrapper(train_data[self.vid_data_key])\n\n # Get video prompt\n prompt = train_data['prompt']\n vr.seek(0)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'json'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n \n # Initialize variables\n video = None\n prompt = None\n prompt_ids = None\n\n # Use default JSON training\n if self.train_data is not None:\n video, prompt, prompt_ids = self.train_data_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(0, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n # width, height = sensible_buckets(self.width, self.height, h, w)\n width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n\n self.image_dir = self.get_images_list(image_dir)\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img = train_data\n\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=1)\n\n prompt = get_text_prompt(\n file_path=train_data,\n text_prompt=self.single_img_prompt,\n fallback_prompt=self.fallback_prompt,\n ext_types=self.img_types, \n use_caption=True\n )\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n if os.path.exists(self.image_dir[0]):\n return len(self.image_dir)\n else:\n return 0\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": (img / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n def get_prompt_ids(self, prompt):\n return self.tokenizer(\n prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n\n video, _ = self.process_video_wrapper(self.video_files[index])\n\n prompt = self.fallback_prompt\n\n prompt_ids = self.get_prompt_ids(prompt)\n\n return {\"pixel_values\": (video[0] / 127.5 - 1.0), \"prompt_ids\": prompt_ids[0], \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)" }, { "identifier": "extract_lora_child_module", "path": "utils/lora.py", "snippet": "def extract_lora_child_module(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append(_child_module)\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras" }, { "identifier": "ddim_inversion", "path": "utils/ddim_utils.py", "snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents" } ]
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
11,373
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]:
1
2023-12-11 04:51:39+00:00
16k
ZS-YANG/FemtoDet-v3
projects/Detic_new/detic/detic_roi_head.py
[ { "identifier": "CascadeRoIHead", "path": "mmdet/models/roi_heads/cascade_roi_head.py", "snippet": "class CascadeRoIHead(BaseRoIHead):\n \"\"\"Cascade roi head including one bbox head and one mask head.\n\n https://arxiv.org/abs/1712.00726\n \"\"\"\n\n def __init__(self,\n num_stages: int,\n stage_loss_weights: Union[List[float], Tuple[float]],\n bbox_roi_extractor: OptMultiConfig = None,\n bbox_head: OptMultiConfig = None,\n mask_roi_extractor: OptMultiConfig = None,\n mask_head: OptMultiConfig = None,\n shared_head: OptConfigType = None,\n train_cfg: OptConfigType = None,\n test_cfg: OptConfigType = None,\n init_cfg: OptMultiConfig = None) -> None:\n assert bbox_roi_extractor is not None\n assert bbox_head is not None\n assert shared_head is None, \\\n 'Shared head is not supported in Cascade RCNN anymore'\n\n self.num_stages = num_stages\n self.stage_loss_weights = stage_loss_weights\n super().__init__(\n bbox_roi_extractor=bbox_roi_extractor,\n bbox_head=bbox_head,\n mask_roi_extractor=mask_roi_extractor,\n mask_head=mask_head,\n shared_head=shared_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n init_cfg=init_cfg)\n\n def init_bbox_head(self, bbox_roi_extractor: MultiConfig,\n bbox_head: MultiConfig) -> None:\n \"\"\"Initialize box head and box roi extractor.\n\n Args:\n bbox_roi_extractor (:obj:`ConfigDict`, dict or list):\n Config of box roi extractor.\n bbox_head (:obj:`ConfigDict`, dict or list): Config\n of box in box head.\n \"\"\"\n self.bbox_roi_extractor = ModuleList()\n self.bbox_head = ModuleList()\n if not isinstance(bbox_roi_extractor, list):\n bbox_roi_extractor = [\n bbox_roi_extractor for _ in range(self.num_stages)\n ]\n if not isinstance(bbox_head, list):\n bbox_head = [bbox_head for _ in range(self.num_stages)]\n assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n self.bbox_roi_extractor.append(MODELS.build(roi_extractor))\n self.bbox_head.append(MODELS.build(head))\n\n def init_mask_head(self, mask_roi_extractor: MultiConfig,\n mask_head: MultiConfig) -> None:\n \"\"\"Initialize mask head and mask roi extractor.\n\n Args:\n mask_head (dict): Config of mask in mask head.\n mask_roi_extractor (:obj:`ConfigDict`, dict or list):\n Config of mask roi extractor.\n \"\"\"\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(MODELS.build(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(self.num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(MODELS.build(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n\n def init_assigner_sampler(self) -> None:\n \"\"\"Initialize assigner and sampler for each stage.\"\"\"\n self.bbox_assigner = []\n self.bbox_sampler = []\n if self.train_cfg is not None:\n for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n self.bbox_assigner.append(\n TASK_UTILS.build(rcnn_train_cfg.assigner))\n self.current_stage = idx\n self.bbox_sampler.append(\n TASK_UTILS.build(\n rcnn_train_cfg.sampler,\n default_args=dict(context=self)))\n\n def _bbox_forward(self, stage: int, x: Tuple[Tensor],\n rois: Tensor) -> dict:\n \"\"\"Box head forward function used in both training and testing.\n\n Args:\n stage (int): The current stage in Cascade RoI Head.\n x (tuple[Tensor]): List of multi-level img features.\n rois (Tensor): RoIs with the shape (n, 5) where the first\n column indicates batch id of each RoI.\n\n Returns:\n dict[str, Tensor]: Usually returns a dictionary with keys:\n\n - `cls_score` (Tensor): Classification scores.\n - `bbox_pred` (Tensor): Box energies / deltas.\n - `bbox_feats` (Tensor): Extract bbox RoI features.\n \"\"\"\n bbox_roi_extractor = self.bbox_roi_extractor[stage]\n bbox_head = self.bbox_head[stage]\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n cls_score, bbox_pred = bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def bbox_loss(self, stage: int, x: Tuple[Tensor],\n sampling_results: List[SamplingResult]) -> dict:\n \"\"\"Run forward function and calculate loss for box head in training.\n\n Args:\n stage (int): The current stage in Cascade RoI Head.\n x (tuple[Tensor]): List of multi-level img features.\n sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n\n Returns:\n dict: Usually returns a dictionary with keys:\n\n - `cls_score` (Tensor): Classification scores.\n - `bbox_pred` (Tensor): Box energies / deltas.\n - `bbox_feats` (Tensor): Extract bbox RoI features.\n - `loss_bbox` (dict): A dictionary of bbox loss components.\n - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n column indicates batch id of each RoI.\n - `bbox_targets` (tuple): Ground truth for proposals in a\n single image. Containing the following list of Tensors:\n (labels, label_weights, bbox_targets, bbox_weights)\n \"\"\"\n bbox_head = self.bbox_head[stage]\n rois = bbox2roi([res.priors for res in sampling_results])\n bbox_results = self._bbox_forward(stage, x, rois)\n bbox_results.update(rois=rois)\n\n bbox_loss_and_target = bbox_head.loss_and_target(\n cls_score=bbox_results['cls_score'],\n bbox_pred=bbox_results['bbox_pred'],\n rois=rois,\n sampling_results=sampling_results,\n rcnn_train_cfg=self.train_cfg[stage])\n bbox_results.update(bbox_loss_and_target)\n\n return bbox_results\n\n def _mask_forward(self, stage: int, x: Tuple[Tensor],\n rois: Tensor) -> dict:\n \"\"\"Mask head forward function used in both training and testing.\n\n Args:\n stage (int): The current stage in Cascade RoI Head.\n x (tuple[Tensor]): Tuple of multi-level img features.\n rois (Tensor): RoIs with the shape (n, 5) where the first\n column indicates batch id of each RoI.\n\n Returns:\n dict: Usually returns a dictionary with keys:\n\n - `mask_preds` (Tensor): Mask prediction.\n \"\"\"\n mask_roi_extractor = self.mask_roi_extractor[stage]\n mask_head = self.mask_head[stage]\n mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n mask_preds = mask_head(mask_feats)\n\n mask_results = dict(mask_preds=mask_preds)\n return mask_results\n\n def mask_loss(self, stage: int, x: Tuple[Tensor],\n sampling_results: List[SamplingResult],\n batch_gt_instances: InstanceList) -> dict:\n \"\"\"Run forward function and calculate loss for mask head in training.\n\n Args:\n stage (int): The current stage in Cascade RoI Head.\n x (tuple[Tensor]): Tuple of multi-level img features.\n sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes``, ``labels``, and\n ``masks`` attributes.\n\n Returns:\n dict: Usually returns a dictionary with keys:\n\n - `mask_preds` (Tensor): Mask prediction.\n - `loss_mask` (dict): A dictionary of mask loss components.\n \"\"\"\n pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n mask_results = self._mask_forward(stage, x, pos_rois)\n\n mask_head = self.mask_head[stage]\n\n mask_loss_and_target = mask_head.loss_and_target(\n mask_preds=mask_results['mask_preds'],\n sampling_results=sampling_results,\n batch_gt_instances=batch_gt_instances,\n rcnn_train_cfg=self.train_cfg[stage])\n mask_results.update(mask_loss_and_target)\n\n return mask_results\n\n def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n batch_data_samples: SampleList) -> dict:\n \"\"\"Perform forward propagation and loss calculation of the detection\n roi on the features of the upstream network.\n\n Args:\n x (tuple[Tensor]): List of multi-level img features.\n rpn_results_list (list[:obj:`InstanceData`]): List of region\n proposals.\n batch_data_samples (list[:obj:`DetDataSample`]): The batch\n data samples. It usually includes information such\n as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components\n \"\"\"\n # TODO: May add a new function in baseroihead\n assert len(rpn_results_list) == len(batch_data_samples)\n outputs = unpack_gt_instances(batch_data_samples)\n batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n = outputs\n\n num_imgs = len(batch_data_samples)\n losses = dict()\n results_list = rpn_results_list\n for stage in range(self.num_stages):\n self.current_stage = stage\n\n stage_loss_weight = self.stage_loss_weights[stage]\n\n # assign gts and sample proposals\n sampling_results = []\n if self.with_bbox or self.with_mask:\n bbox_assigner = self.bbox_assigner[stage]\n bbox_sampler = self.bbox_sampler[stage]\n\n for i in range(num_imgs):\n results = results_list[i]\n # rename rpn_results.bboxes to rpn_results.priors\n results.priors = results.pop('bboxes')\n\n assign_result = bbox_assigner.assign(\n results, batch_gt_instances[i],\n batch_gt_instances_ignore[i])\n\n sampling_result = bbox_sampler.sample(\n assign_result,\n results,\n batch_gt_instances[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n bbox_results = self.bbox_loss(stage, x, sampling_results)\n\n for name, value in bbox_results['loss_bbox'].items():\n losses[f's{stage}.{name}'] = (\n value * stage_loss_weight if 'loss' in name else value)\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self.mask_loss(stage, x, sampling_results,\n batch_gt_instances)\n for name, value in mask_results['loss_mask'].items():\n losses[f's{stage}.{name}'] = (\n value * stage_loss_weight if 'loss' in name else value)\n\n # refine bboxes\n if stage < self.num_stages - 1:\n bbox_head = self.bbox_head[stage]\n with torch.no_grad():\n results_list = bbox_head.refine_bboxes(\n sampling_results, bbox_results, batch_img_metas)\n # Empty proposal\n if results_list is None:\n break\n return losses\n\n def predict_bbox(self,\n x: Tuple[Tensor],\n batch_img_metas: List[dict],\n rpn_results_list: InstanceList,\n rcnn_test_cfg: ConfigType,\n rescale: bool = False,\n **kwargs) -> InstanceList:\n \"\"\"Perform forward propagation of the bbox head and predict detection\n results on the features of the upstream network.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n batch_img_metas (list[dict]): List of image information.\n rpn_results_list (list[:obj:`InstanceData`]): List of region\n proposals.\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n rescale (bool): If True, return boxes in original image space.\n Defaults to False.\n\n Returns:\n list[:obj:`InstanceData`]: Detection results of each image\n after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n \"\"\"\n proposals = [res.bboxes for res in rpn_results_list]\n num_proposals_per_img = tuple(len(p) for p in proposals)\n rois = bbox2roi(proposals)\n\n if rois.shape[0] == 0:\n return empty_instances(\n batch_img_metas,\n rois.device,\n task_type='bbox',\n box_type=self.bbox_head[-1].predict_box_type,\n num_classes=self.bbox_head[-1].num_classes,\n score_per_cls=rcnn_test_cfg is None)\n\n rois, cls_scores, bbox_preds = self._refine_roi(\n x=x,\n rois=rois,\n batch_img_metas=batch_img_metas,\n num_proposals_per_img=num_proposals_per_img,\n **kwargs)\n\n results_list = self.bbox_head[-1].predict_by_feat(\n rois=rois,\n cls_scores=cls_scores,\n bbox_preds=bbox_preds,\n batch_img_metas=batch_img_metas,\n rescale=rescale,\n rcnn_test_cfg=rcnn_test_cfg)\n return results_list\n\n def predict_mask(self,\n x: Tuple[Tensor],\n batch_img_metas: List[dict],\n results_list: List[InstanceData],\n rescale: bool = False) -> List[InstanceData]:\n \"\"\"Perform forward propagation of the mask head and predict detection\n results on the features of the upstream network.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n batch_img_metas (list[dict]): List of image information.\n results_list (list[:obj:`InstanceData`]): Detection results of\n each image.\n rescale (bool): If True, return boxes in original image space.\n Defaults to False.\n\n Returns:\n list[:obj:`InstanceData`]: Detection results of each image\n after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n - masks (Tensor): Has a shape (num_instances, H, W).\n \"\"\"\n bboxes = [res.bboxes for res in results_list]\n mask_rois = bbox2roi(bboxes)\n if mask_rois.shape[0] == 0:\n results_list = empty_instances(\n batch_img_metas,\n mask_rois.device,\n task_type='mask',\n instance_results=results_list,\n mask_thr_binary=self.test_cfg.mask_thr_binary)\n return results_list\n\n num_mask_rois_per_img = [len(res) for res in results_list]\n aug_masks = []\n for stage in range(self.num_stages):\n mask_results = self._mask_forward(stage, x, mask_rois)\n mask_preds = mask_results['mask_preds']\n # split batch mask prediction back to each image\n mask_preds = mask_preds.split(num_mask_rois_per_img, 0)\n aug_masks.append([m.sigmoid().detach() for m in mask_preds])\n\n merged_masks = []\n for i in range(len(batch_img_metas)):\n aug_mask = [mask[i] for mask in aug_masks]\n merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])\n merged_masks.append(merged_mask)\n results_list = self.mask_head[-1].predict_by_feat(\n mask_preds=merged_masks,\n results_list=results_list,\n batch_img_metas=batch_img_metas,\n rcnn_test_cfg=self.test_cfg,\n rescale=rescale,\n activate_map=True)\n return results_list\n\n def _refine_roi(self, x: Tuple[Tensor], rois: Tensor,\n batch_img_metas: List[dict],\n num_proposals_per_img: Sequence[int], **kwargs) -> tuple:\n \"\"\"Multi-stage refinement of RoI.\n\n Args:\n x (tuple[Tensor]): List of multi-level img features.\n rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2]\n batch_img_metas (list[dict]): List of image information.\n num_proposals_per_img (sequence[int]): number of proposals\n in each image.\n\n Returns:\n tuple:\n\n - rois (Tensor): Refined RoI.\n - cls_scores (list[Tensor]): Average predicted\n cls score per image.\n - bbox_preds (list[Tensor]): Bbox branch predictions\n for the last stage of per image.\n \"\"\"\n # \"ms\" in variable names means multi-stage\n ms_scores = []\n for stage in range(self.num_stages):\n bbox_results = self._bbox_forward(\n stage=stage, x=x, rois=rois, **kwargs)\n\n # split batch bbox prediction back to each image\n cls_scores = bbox_results['cls_score']\n bbox_preds = bbox_results['bbox_pred']\n\n rois = rois.split(num_proposals_per_img, 0)\n cls_scores = cls_scores.split(num_proposals_per_img, 0)\n ms_scores.append(cls_scores)\n\n # some detector with_reg is False, bbox_preds will be None\n if bbox_preds is not None:\n # TODO move this to a sabl_roi_head\n # the bbox prediction of some detectors like SABL is not Tensor\n if isinstance(bbox_preds, torch.Tensor):\n bbox_preds = bbox_preds.split(num_proposals_per_img, 0)\n else:\n bbox_preds = self.bbox_head[stage].bbox_pred_split(\n bbox_preds, num_proposals_per_img)\n else:\n bbox_preds = (None, ) * len(batch_img_metas)\n\n if stage < self.num_stages - 1:\n bbox_head = self.bbox_head[stage]\n if bbox_head.custom_activation:\n cls_scores = [\n bbox_head.loss_cls.get_activation(s)\n for s in cls_scores\n ]\n refine_rois_list = []\n for i in range(len(batch_img_metas)):\n if rois[i].shape[0] > 0:\n bbox_label = cls_scores[i][:, :-1].argmax(dim=1)\n # Refactor `bbox_head.regress_by_class` to only accept\n # box tensor without img_idx concatenated.\n refined_bboxes = bbox_head.regress_by_class(\n rois[i][:, 1:], bbox_label, bbox_preds[i],\n batch_img_metas[i])\n refined_bboxes = get_box_tensor(refined_bboxes)\n refined_rois = torch.cat(\n [rois[i][:, [0]], refined_bboxes], dim=1)\n refine_rois_list.append(refined_rois)\n rois = torch.cat(refine_rois_list)\n\n # average scores of each image by stages\n cls_scores = [\n sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n for i in range(len(batch_img_metas))\n ]\n return rois, cls_scores, bbox_preds\n\n def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n batch_data_samples: SampleList) -> tuple:\n \"\"\"Network forward process. Usually includes backbone, neck and head\n forward without any post-processing.\n\n Args:\n x (List[Tensor]): Multi-level features that may have different\n resolutions.\n rpn_results_list (list[:obj:`InstanceData`]): List of region\n proposals.\n batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n the meta information of each image and corresponding\n annotations.\n\n Returns\n tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n forward.\n \"\"\"\n results = ()\n batch_img_metas = [\n data_samples.metainfo for data_samples in batch_data_samples\n ]\n proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]\n num_proposals_per_img = tuple(len(p) for p in proposals)\n rois = bbox2roi(proposals)\n # bbox head\n if self.with_bbox:\n rois, cls_scores, bbox_preds = self._refine_roi(\n x, rois, batch_img_metas, num_proposals_per_img)\n results = results + (cls_scores, bbox_preds)\n # mask head\n if self.with_mask:\n aug_masks = []\n rois = torch.cat(rois)\n for stage in range(self.num_stages):\n mask_results = self._mask_forward(stage, x, rois)\n mask_preds = mask_results['mask_preds']\n mask_preds = mask_preds.split(num_proposals_per_img, 0)\n aug_masks.append([m.sigmoid().detach() for m in mask_preds])\n\n merged_masks = []\n for i in range(len(batch_img_metas)):\n aug_mask = [mask[i] for mask in aug_masks]\n merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])\n merged_masks.append(merged_mask)\n results = results + (merged_masks, )\n return results" }, { "identifier": "SamplingResult", "path": "mmdet/models/task_modules/samplers/sampling_result.py", "snippet": "class SamplingResult(util_mixins.NiceRepr):\n \"\"\"Bbox sampling result.\n\n Args:\n pos_inds (Tensor): Indices of positive samples.\n neg_inds (Tensor): Indices of negative samples.\n priors (Tensor): The priors can be anchors or points,\n or the bboxes predicted by the previous stage.\n gt_bboxes (Tensor): Ground truth of bboxes.\n assign_result (:obj:`AssignResult`): Assigning results.\n gt_flags (Tensor): The Ground truth flags.\n avg_factor_with_neg (bool): If True, ``avg_factor`` equal to\n the number of total priors; Otherwise, it is the number of\n positive priors. Defaults to True.\n\n Example:\n >>> # xdoctest: +IGNORE_WANT\n >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random(rng=10)\n >>> print(f'self = {self}')\n self = <SamplingResult({\n 'neg_inds': tensor([1, 2, 3, 5, 6, 7, 8,\n 9, 10, 11, 12, 13]),\n 'neg_priors': torch.Size([12, 4]),\n 'num_gts': 1,\n 'num_neg': 12,\n 'num_pos': 1,\n 'avg_factor': 13,\n 'pos_assigned_gt_inds': tensor([0]),\n 'pos_inds': tensor([0]),\n 'pos_is_gt': tensor([1], dtype=torch.uint8),\n 'pos_priors': torch.Size([1, 4])\n })>\n \"\"\"\n\n def __init__(self,\n pos_inds: Tensor,\n neg_inds: Tensor,\n priors: Tensor,\n gt_bboxes: Tensor,\n assign_result: AssignResult,\n gt_flags: Tensor,\n avg_factor_with_neg: bool = True) -> None:\n self.pos_inds = pos_inds\n self.neg_inds = neg_inds\n self.num_pos = max(pos_inds.numel(), 1)\n self.num_neg = max(neg_inds.numel(), 1)\n self.avg_factor_with_neg = avg_factor_with_neg\n self.avg_factor = self.num_pos + self.num_neg \\\n if avg_factor_with_neg else self.num_pos\n self.pos_priors = priors[pos_inds]\n self.neg_priors = priors[neg_inds]\n self.pos_is_gt = gt_flags[pos_inds]\n\n self.num_gts = gt_bboxes.shape[0]\n self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n self.pos_gt_labels = assign_result.labels[pos_inds]\n box_dim = gt_bboxes.box_dim if isinstance(gt_bboxes, BaseBoxes) else 4\n if gt_bboxes.numel() == 0:\n # hack for index error case\n assert self.pos_assigned_gt_inds.numel() == 0\n self.pos_gt_bboxes = gt_bboxes.view(-1, box_dim)\n else:\n if len(gt_bboxes.shape) < 2:\n gt_bboxes = gt_bboxes.view(-1, box_dim)\n self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long()]\n\n @property\n def priors(self):\n \"\"\"torch.Tensor: concatenated positive and negative priors\"\"\"\n return cat_boxes([self.pos_priors, self.neg_priors])\n\n @property\n def bboxes(self):\n \"\"\"torch.Tensor: concatenated positive and negative boxes\"\"\"\n warnings.warn('DeprecationWarning: bboxes is deprecated, '\n 'please use \"priors\" instead')\n return self.priors\n\n @property\n def pos_bboxes(self):\n warnings.warn('DeprecationWarning: pos_bboxes is deprecated, '\n 'please use \"pos_priors\" instead')\n return self.pos_priors\n\n @property\n def neg_bboxes(self):\n warnings.warn('DeprecationWarning: neg_bboxes is deprecated, '\n 'please use \"neg_priors\" instead')\n return self.neg_priors\n\n def to(self, device):\n \"\"\"Change the device of the data inplace.\n\n Example:\n >>> self = SamplingResult.random()\n >>> print(f'self = {self.to(None)}')\n >>> # xdoctest: +REQUIRES(--gpu)\n >>> print(f'self = {self.to(0)}')\n \"\"\"\n _dict = self.__dict__\n for key, value in _dict.items():\n if isinstance(value, (torch.Tensor, BaseBoxes)):\n _dict[key] = value.to(device)\n return self\n\n def __nice__(self):\n data = self.info.copy()\n data['pos_priors'] = data.pop('pos_priors').shape\n data['neg_priors'] = data.pop('neg_priors').shape\n parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n body = ' ' + ',\\n '.join(parts)\n return '{\\n' + body + '\\n}'\n\n @property\n def info(self):\n \"\"\"Returns a dictionary of info about the object.\"\"\"\n return {\n 'pos_inds': self.pos_inds,\n 'neg_inds': self.neg_inds,\n 'pos_priors': self.pos_priors,\n 'neg_priors': self.neg_priors,\n 'pos_is_gt': self.pos_is_gt,\n 'num_gts': self.num_gts,\n 'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n 'num_pos': self.num_pos,\n 'num_neg': self.num_neg,\n 'avg_factor': self.avg_factor\n }\n\n @classmethod\n def random(cls, rng=None, **kwargs):\n \"\"\"\n Args:\n rng (None | int | numpy.random.RandomState): seed or state.\n kwargs (keyword arguments):\n - num_preds: Number of predicted boxes.\n - num_gts: Number of true boxes.\n - p_ignore (float): Probability of a predicted box assigned to\n an ignored truth.\n - p_assigned (float): probability of a predicted box not being\n assigned.\n\n Returns:\n :obj:`SamplingResult`: Randomly generated sampling result.\n\n Example:\n >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random()\n >>> print(self.__dict__)\n \"\"\"\n from mmengine.structures import InstanceData\n\n from mmdet.models.task_modules.assigners import AssignResult\n from mmdet.models.task_modules.samplers import RandomSampler\n rng = ensure_rng(rng)\n\n # make probabilistic?\n num = 32\n pos_fraction = 0.5\n neg_pos_ub = -1\n\n assign_result = AssignResult.random(rng=rng, **kwargs)\n\n # Note we could just compute an assignment\n priors = random_boxes(assign_result.num_preds, rng=rng)\n gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)\n gt_labels = torch.randint(\n 0, 5, (assign_result.num_gts, ), dtype=torch.long)\n\n pred_instances = InstanceData()\n pred_instances.priors = priors\n\n gt_instances = InstanceData()\n gt_instances.bboxes = gt_bboxes\n gt_instances.labels = gt_labels\n\n add_gt_as_proposals = True\n\n sampler = RandomSampler(\n num,\n pos_fraction,\n neg_pos_ub=neg_pos_ub,\n add_gt_as_proposals=add_gt_as_proposals,\n rng=rng)\n self = sampler.sample(\n assign_result=assign_result,\n pred_instances=pred_instances,\n gt_instances=gt_instances)\n return self" }, { "identifier": "merge_aug_masks", "path": "mmdet/models/test_time_augs/merge_augs.py", "snippet": "def merge_aug_masks(aug_masks: List[Tensor],\n img_metas: dict,\n weights: Optional[Union[list, Tensor]] = None) -> Tensor:\n \"\"\"Merge augmented mask prediction.\n\n Args:\n aug_masks (list[Tensor]): each has shape\n (n, c, h, w).\n img_metas (dict): Image information.\n weights (list or Tensor): Weight of each aug_masks,\n the length should be n.\n\n Returns:\n Tensor: has shape (n, c, h, w)\n \"\"\"\n recovered_masks = []\n for i, mask in enumerate(aug_masks):\n if weights is not None:\n assert len(weights) == len(aug_masks)\n weight = weights[i]\n else:\n weight = 1\n flip = img_metas.get('flip', False)\n if flip:\n flip_direction = img_metas['flip_direction']\n if flip_direction == 'horizontal':\n mask = mask[:, :, :, ::-1]\n elif flip_direction == 'vertical':\n mask = mask[:, :, ::-1, :]\n elif flip_direction == 'diagonal':\n mask = mask[:, :, :, ::-1]\n mask = mask[:, :, ::-1, :]\n else:\n raise ValueError(\n f\"Invalid flipping direction '{flip_direction}'\")\n recovered_masks.append(mask[None, :] * weight)\n\n merged_masks = torch.cat(recovered_masks, 0).mean(dim=0)\n if weights is not None:\n merged_masks = merged_masks * len(weights) / sum(weights)\n return merged_masks" }, { "identifier": "empty_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def empty_instances(batch_img_metas: List[dict],\n device: torch.device,\n task_type: str,\n instance_results: OptInstanceList = None,\n mask_thr_binary: Union[int, float] = 0,\n box_type: Union[str, type] = 'hbox',\n use_box_type: bool = False,\n num_classes: int = 80,\n score_per_cls: bool = False) -> List[InstanceData]:\n \"\"\"Handle predicted instances when RoI is empty.\n\n Note: If ``instance_results`` is not None, it will be modified\n in place internally, and then return ``instance_results``\n\n Args:\n batch_img_metas (list[dict]): List of image information.\n device (torch.device): Device of tensor.\n task_type (str): Expected returned task type. it currently\n supports bbox and mask.\n instance_results (list[:obj:`InstanceData`]): List of instance\n results.\n mask_thr_binary (int, float): mask binarization threshold.\n Defaults to 0.\n box_type (str or type): The empty box type. Defaults to `hbox`.\n use_box_type (bool): Whether to warp boxes with the box type.\n Defaults to False.\n num_classes (int): num_classes of bbox_head. Defaults to 80.\n score_per_cls (bool): Whether to generate classwise score for\n the empty instance. ``score_per_cls`` will be True when the model\n needs to produce raw results without nms. Defaults to False.\n\n Returns:\n list[:obj:`InstanceData`]: Detection results of each image\n \"\"\"\n assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \\\n f' but got {task_type}'\n\n if instance_results is not None:\n assert len(instance_results) == len(batch_img_metas)\n\n results_list = []\n for img_id in range(len(batch_img_metas)):\n if instance_results is not None:\n results = instance_results[img_id]\n assert isinstance(results, InstanceData)\n else:\n results = InstanceData()\n\n if task_type == 'bbox':\n _, box_type = get_box_type(box_type)\n bboxes = torch.zeros(0, box_type.box_dim, device=device)\n if use_box_type:\n bboxes = box_type(bboxes, clone=False)\n results.bboxes = bboxes\n score_shape = (0, num_classes + 1) if score_per_cls else (0, )\n results.scores = torch.zeros(score_shape, device=device)\n results.labels = torch.zeros((0, ),\n device=device,\n dtype=torch.long)\n else:\n # TODO: Handle the case where rescale is false\n img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2]\n # the type of `im_mask` will be torch.bool or torch.uint8,\n # where uint8 if for visualization and debugging.\n im_mask = torch.zeros(\n 0,\n img_h,\n img_w,\n device=device,\n dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8)\n results.masks = im_mask\n results_list.append(results)\n return results_list" }, { "identifier": "unpack_gt_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def unpack_gt_instances(batch_data_samples: SampleList) -> tuple:\n \"\"\"Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based\n on ``batch_data_samples``\n\n Args:\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n Returns:\n tuple:\n\n - batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n - batch_gt_instances_ignore (list[:obj:`InstanceData`]):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n - batch_img_metas (list[dict]): Meta information of each image,\n e.g., image size, scaling factor, etc.\n \"\"\"\n batch_gt_instances = []\n batch_gt_instances_ignore = []\n batch_img_metas = []\n for data_sample in batch_data_samples:\n batch_img_metas.append(data_sample.metainfo)\n batch_gt_instances.append(data_sample.gt_instances)\n if 'ignored_instances' in data_sample:\n batch_gt_instances_ignore.append(data_sample.ignored_instances)\n else:\n batch_gt_instances_ignore.append(None)\n\n return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas" }, { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "SampleList", "path": "mmdet/structures/det_data_sample.py", "snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_instances(self, value: InstanceData):\n def gt_instances(self):\n def pred_instances(self) -> InstanceData:\n def pred_instances(self, value: InstanceData):\n def pred_instances(self):\n def pred_track_instances(self) -> InstanceData:\n def pred_track_instances(self, value: InstanceData):\n def pred_track_instances(self):\n def ignored_instances(self) -> InstanceData:\n def ignored_instances(self, value: InstanceData):\n def ignored_instances(self):\n def gt_panoptic_seg(self) -> PixelData:\n def gt_panoptic_seg(self, value: PixelData):\n def gt_panoptic_seg(self):\n def pred_panoptic_seg(self) -> PixelData:\n def pred_panoptic_seg(self, value: PixelData):\n def pred_panoptic_seg(self):\n def gt_sem_seg(self) -> PixelData:\n def gt_sem_seg(self, value: PixelData):\n def gt_sem_seg(self):\n def pred_sem_seg(self) -> PixelData:\n def pred_sem_seg(self, value: PixelData):\n def pred_sem_seg(self):" }, { "identifier": "bbox2roi", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def bbox2roi(bbox_list: List[Union[Tensor, BaseBoxes]]) -> Tensor:\n \"\"\"Convert a list of bboxes to roi format.\n\n Args:\n bbox_list (List[Union[Tensor, :obj:`BaseBoxes`]): a list of bboxes\n corresponding to a batch of images.\n\n Returns:\n Tensor: shape (n, box_dim + 1), where ``box_dim`` depends on the\n different box types. For example, If the box type in ``bbox_list``\n is HorizontalBoxes, the output shape is (n, 5). Each row of data\n indicates [batch_ind, x1, y1, x2, y2].\n \"\"\"\n rois_list = []\n for img_id, bboxes in enumerate(bbox_list):\n bboxes = get_box_tensor(bboxes)\n img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)\n rois = torch.cat([img_inds, bboxes], dim=-1)\n rois_list.append(rois)\n rois = torch.cat(rois_list, 0)\n return rois" }, { "identifier": "get_box_tensor", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def get_box_tensor(boxes: Union[Tensor, BaseBoxes]) -> Tensor:\n \"\"\"Get tensor data from box type boxes.\n\n Args:\n boxes (Tensor or BaseBoxes): boxes with type of tensor or box type.\n If its type is a tensor, the boxes will be directly returned.\n If its type is a box type, the `boxes.tensor` will be returned.\n\n Returns:\n Tensor: boxes tensor.\n \"\"\"\n if isinstance(boxes, BaseBoxes):\n boxes = boxes.tensor\n return boxes" }, { "identifier": "ConfigType", "path": "mmdet/utils/typing_utils.py", "snippet": "" } ]
from typing import List, Sequence, Tuple from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.roi_heads import CascadeRoIHead from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.test_time_augs import merge_aug_masks from mmdet.models.utils import empty_instances, unpack_gt_instances from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi, get_box_tensor from mmdet.utils import ConfigType, InstanceList, MultiConfig import torch
10,806
# Copyright (c) OpenMMLab. All rights reserved. @MODELS.register_module() class DeticRoIHead(CascadeRoIHead): def __init__( self, *, mult_proposal_score: bool = False, with_image_labels: bool = False, add_image_box: bool = False, image_box_size: float = 1.0, ws_num_props: int = 128, add_feature_to_prop: bool = False, mask_weight: float = 1.0, one_class_per_proposal: bool = False, **kwargs, ): super().__init__(**kwargs) self.mult_proposal_score = mult_proposal_score self.with_image_labels = with_image_labels self.add_image_box = add_image_box self.image_box_size = image_box_size self.ws_num_props = ws_num_props self.add_feature_to_prop = add_feature_to_prop self.mask_weight = mask_weight self.one_class_per_proposal = one_class_per_proposal def init_mask_head(self, mask_roi_extractor: MultiConfig, mask_head: MultiConfig) -> None: """Initialize mask head and mask roi extractor. Args: mask_head (dict): Config of mask in mask head. mask_roi_extractor (:obj:`ConfigDict`, dict or list): Config of mask roi extractor. """ self.mask_head = MODELS.build(mask_head) if mask_roi_extractor is not None: self.share_roi_extractor = False self.mask_roi_extractor = MODELS.build(mask_roi_extractor) else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor def _refine_roi(self, x: Tuple[Tensor], rois: Tensor, batch_img_metas: List[dict], num_proposals_per_img: Sequence[int], **kwargs) -> tuple: """Multi-stage refinement of RoI. Args: x (tuple[Tensor]): List of multi-level img features. rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2] batch_img_metas (list[dict]): List of image information. num_proposals_per_img (sequence[int]): number of proposals in each image. Returns: tuple: - rois (Tensor): Refined RoI. - cls_scores (list[Tensor]): Average predicted cls score per image. - bbox_preds (list[Tensor]): Bbox branch predictions for the last stage of per image. """ # "ms" in variable names means multi-stage ms_scores = [] for stage in range(self.num_stages): bbox_results = self._bbox_forward( stage=stage, x=x, rois=rois, **kwargs) # split batch bbox prediction back to each image cls_scores = bbox_results['cls_score'].sigmoid() bbox_preds = bbox_results['bbox_pred'] rois = rois.split(num_proposals_per_img, 0) cls_scores = cls_scores.split(num_proposals_per_img, 0) ms_scores.append(cls_scores) bbox_preds = bbox_preds.split(num_proposals_per_img, 0) if stage < self.num_stages - 1: bbox_head = self.bbox_head[stage] refine_rois_list = [] for i in range(len(batch_img_metas)): if rois[i].shape[0] > 0: bbox_label = cls_scores[i][:, :-1].argmax(dim=1) # Refactor `bbox_head.regress_by_class` to only accept # box tensor without img_idx concatenated. refined_bboxes = bbox_head.regress_by_class( rois[i][:, 1:], bbox_label, bbox_preds[i], batch_img_metas[i]) refined_bboxes = get_box_tensor(refined_bboxes) refined_rois = torch.cat( [rois[i][:, [0]], refined_bboxes], dim=1) refine_rois_list.append(refined_rois) rois = torch.cat(refine_rois_list) # ms_scores aligned # average scores of each image by stages cls_scores = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(len(batch_img_metas)) ] # aligned return rois, cls_scores, bbox_preds def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict],
# Copyright (c) OpenMMLab. All rights reserved. @MODELS.register_module() class DeticRoIHead(CascadeRoIHead): def __init__( self, *, mult_proposal_score: bool = False, with_image_labels: bool = False, add_image_box: bool = False, image_box_size: float = 1.0, ws_num_props: int = 128, add_feature_to_prop: bool = False, mask_weight: float = 1.0, one_class_per_proposal: bool = False, **kwargs, ): super().__init__(**kwargs) self.mult_proposal_score = mult_proposal_score self.with_image_labels = with_image_labels self.add_image_box = add_image_box self.image_box_size = image_box_size self.ws_num_props = ws_num_props self.add_feature_to_prop = add_feature_to_prop self.mask_weight = mask_weight self.one_class_per_proposal = one_class_per_proposal def init_mask_head(self, mask_roi_extractor: MultiConfig, mask_head: MultiConfig) -> None: """Initialize mask head and mask roi extractor. Args: mask_head (dict): Config of mask in mask head. mask_roi_extractor (:obj:`ConfigDict`, dict or list): Config of mask roi extractor. """ self.mask_head = MODELS.build(mask_head) if mask_roi_extractor is not None: self.share_roi_extractor = False self.mask_roi_extractor = MODELS.build(mask_roi_extractor) else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor def _refine_roi(self, x: Tuple[Tensor], rois: Tensor, batch_img_metas: List[dict], num_proposals_per_img: Sequence[int], **kwargs) -> tuple: """Multi-stage refinement of RoI. Args: x (tuple[Tensor]): List of multi-level img features. rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2] batch_img_metas (list[dict]): List of image information. num_proposals_per_img (sequence[int]): number of proposals in each image. Returns: tuple: - rois (Tensor): Refined RoI. - cls_scores (list[Tensor]): Average predicted cls score per image. - bbox_preds (list[Tensor]): Bbox branch predictions for the last stage of per image. """ # "ms" in variable names means multi-stage ms_scores = [] for stage in range(self.num_stages): bbox_results = self._bbox_forward( stage=stage, x=x, rois=rois, **kwargs) # split batch bbox prediction back to each image cls_scores = bbox_results['cls_score'].sigmoid() bbox_preds = bbox_results['bbox_pred'] rois = rois.split(num_proposals_per_img, 0) cls_scores = cls_scores.split(num_proposals_per_img, 0) ms_scores.append(cls_scores) bbox_preds = bbox_preds.split(num_proposals_per_img, 0) if stage < self.num_stages - 1: bbox_head = self.bbox_head[stage] refine_rois_list = [] for i in range(len(batch_img_metas)): if rois[i].shape[0] > 0: bbox_label = cls_scores[i][:, :-1].argmax(dim=1) # Refactor `bbox_head.regress_by_class` to only accept # box tensor without img_idx concatenated. refined_bboxes = bbox_head.regress_by_class( rois[i][:, 1:], bbox_label, bbox_preds[i], batch_img_metas[i]) refined_bboxes = get_box_tensor(refined_bboxes) refined_rois = torch.cat( [rois[i][:, [0]], refined_bboxes], dim=1) refine_rois_list.append(refined_rois) rois = torch.cat(refine_rois_list) # ms_scores aligned # average scores of each image by stages cls_scores = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(len(batch_img_metas)) ] # aligned return rois, cls_scores, bbox_preds def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict],
rpn_results_list: InstanceList,
9
2023-12-11 15:23:03+00:00
16k
merlresearch/PixPNet
pixpnet/protonets/lit_model.py
[ { "identifier": "get_metadata", "path": "pixpnet/data.py", "snippet": "def get_metadata(config):\n dataset = config.dataset.name.upper().replace(\"-\", \"\")\n metadata = DatasetMeta(\n output_size=DATA_NUM_OUTPUTS[dataset],\n input_channels=DATA_CHANNELS[dataset],\n input_size=_get_input_size(dataset),\n label_names=LABEL_NAMES.get(dataset),\n )\n return metadata" }, { "identifier": "LitData", "path": "pixpnet/lightning/lightning_data.py", "snippet": "class LitData(LightningDataModule):\n def __init__(self, config, num_workers=None, **kwargs):\n super().__init__()\n self.config = config\n self.train = self.train_no_aug = self.val = self.test = None\n self.kwargs = kwargs\n # Required to check if setup was called prior...\n # https://github.com/Lightning-AI/lightning/issues/9865\n self.datasets_loaded = False\n if num_workers is None:\n num_workers = num_cpus()\n self.num_workers = num_workers\n\n def setup(self, stage=None):\n \"\"\"called on every GPU\"\"\"\n if self.datasets_loaded:\n return\n\n logger.info(f\"Loading the {self.config.dataset.name} dataset \" f\"(val_size={self.config.dataset.val_size})\")\n\n datasets = get_datasets(self.config, **self.kwargs)\n\n if self.config.dataset.needs_unaugmented:\n self.train, self.train_no_aug, self.val, self.test = datasets\n else:\n self.train, self.val, self.test = datasets\n\n # get_datasets may modify val_size\n if self.config.dataset.val_size == 0:\n if self.trainer:\n self.trainer.limit_val_batches = 0\n self.trainer.num_sanity_val_steps = 0\n\n self.datasets_loaded = True\n\n def train_dataloader(self):\n return DataLoader(\n self.train,\n batch_size=self.config.train.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n drop_last=True,\n )\n\n def train_no_aug_dataloader(self):\n if not self.config.dataset.needs_unaugmented:\n raise ValueError(\"Unaugmented train data set requested, but \" \"--dataset.needs-unaugmented is False\")\n return DataLoader(\n self.train_no_aug,\n batch_size=self.config.train.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n drop_last=False,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val, batch_size=self.config.test.batch_size, num_workers=self.num_workers, drop_last=False\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.test, batch_size=self.config.test.batch_size, num_workers=self.num_workers, drop_last=False\n )" }, { "identifier": "BaseLitModel", "path": "pixpnet/lightning/lit_module.py", "snippet": "class BaseLitModel(LightningModule, metaclass=ABCMeta):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.optimizer = None\n\n # training stats\n self._train_time_total = 0\n self._train_time_per_epoch = 0\n self._actual_epoch_count = 0\n self._infer_count = 0\n self._infer_batch_count = 0\n self._inference_time_per_sample = 0\n self._inference_time_per_batch = 0\n self._train_t0 = None\n self._inference_t0 = None\n\n @property\n def train_time_total(self):\n return self._train_time_total\n\n @property\n def train_time_per_epoch(self):\n return self._train_time_per_epoch\n\n @property\n def inference_time_per_sample(self):\n return self._inference_time_per_sample\n\n @property\n def inference_time_per_batch(self):\n return self._inference_time_per_batch\n\n @abstractmethod\n def _forward(self, *args, **kwargs) -> Any:\n raise NotImplementedError\n\n @staticmethod\n def _metric_per_split(metric, *args, **kwargs):\n return CollisionlessModuleDict(\n {\"train\": metric(*args, **kwargs), \"val\": metric(*args, **kwargs), \"test\": metric(*args, **kwargs)}\n )\n\n def forward(self, x, *args, **kwargs) -> Any:\n if not self.training:\n # only record inference time in non-training mode\n self._inference_t0 = time.time()\n out = self._forward(x, *args, **kwargs)\n if not self.training:\n duration = time.time() - self._inference_t0\n self._inference_time_per_batch = (self._inference_time_per_batch * self._infer_batch_count + duration) / (\n self._infer_batch_count + 1\n )\n self._infer_batch_count += 1\n self._inference_time_per_sample = (self._inference_time_per_sample * self._infer_count + duration) / (\n self._infer_count + len(x)\n )\n self._infer_count += len(x)\n return out\n\n def on_train_start(self):\n if self.config.debug:\n torch.autograd.set_detect_anomaly(True)\n hp_lr_metrics = {f\"hp/lr_group_{i}\": 0 for i in range(len(self.optimizer.param_groups))}\n for lit_logger in self.loggers:\n args = (hp_lr_metrics,) if isinstance(lit_logger, TensorBoardLogger) else ()\n lit_logger.log_hyperparams(self.config.optimizer, *args)\n lit_logger.log_hyperparams(self.config.train)\n lit_logger.log_hyperparams(self.config.model)\n\n def on_train_epoch_start(self) -> None:\n self._train_t0 = time.time()\n\n def on_train_epoch_end(self) -> None:\n duration = time.time() - self._train_t0\n self._train_time_total += duration\n # running mean\n self._train_time_per_epoch = (self._train_time_per_epoch * self._actual_epoch_count + duration) / (\n self._actual_epoch_count + 1\n )\n self._actual_epoch_count += 1\n\n def training_step(self, batch, batch_idx, dataset_idx=None):\n loss = self._shared_eval(batch, batch_idx, dataset_idx, \"train\")\n for i, param_group in enumerate(self.optimizer.param_groups):\n self.log(f\"hp/lr_group_{i}\", param_group[\"lr\"])\n return loss\n\n def validation_step(self, batch, batch_idx, dataset_idx=None):\n self._shared_eval(batch, batch_idx, dataset_idx, \"val\")\n\n def test_step(self, batch, batch_idx, dataset_idx=None):\n self._shared_eval(batch, batch_idx, dataset_idx, \"test\")\n\n @abstractmethod\n def _shared_eval(self, batch: Any, batch_idx: int, dataset_idx: int, prefix: str) -> torch.Tensor:\n \"\"\"\n Handle batch, compute forward, compute loss and other metrics,\n then return the loss.\n \"\"\"\n raise NotImplementedError" }, { "identifier": "get_optimizer_cls", "path": "pixpnet/optim.py", "snippet": "def get_optimizer_cls(\n config: argparse.Namespace,\n ignore: Optional[Set[str]] = None,\n) -> Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]:\n if ignore is None:\n ignore = set()\n try:\n optimizer_cls = _LOOSE_OPTIMIZER_MAP[config.optimizer.name.lower()]\n except KeyError:\n raise ValueError(f'No such optimizer \"{config.optimizer.name}\"')\n hparams, invalid_keys = intersect_func_and_kwargs(\n optimizer_cls,\n config.optimizer,\n exclude_func_args={\"params\"},\n exclude_kwargs={\"name\", \"throttle_lr\", \"lr_schedule\", \"lr_scheduler\", \"lr_factor\", \"warmup_period\"} | ignore,\n )\n if invalid_keys:\n logger.warning(\n f\"Will not pass the following invalid optimizer \"\n f\"hyperparameters to {optimizer_cls.__name__}: \"\n f'{\", \".join(invalid_keys)}'\n )\n logger.info(f\"Optimizer hyperparameters for {optimizer_cls.__name__}: \" f\"{hparams}\")\n return optimizer_cls, hparams" }, { "identifier": "get_scheduler", "path": "pixpnet/optim.py", "snippet": "def get_scheduler(optimizer: torch.optim.Optimizer, config: argparse.Namespace) -> LRWithWarmupMixin:\n \"\"\"\"\"\"\n if config.optimizer.warmup_period:\n lr_warmup = ExponentialWarmup(optimizer, warmup_period=config.optimizer.warmup_period)\n else:\n lr_warmup = None\n if config.optimizer.lr_scheduler == \"multistep\":\n lr_scheduler = MultiStepLRWithWarmup(\n optimizer,\n milestones=config.optimizer.lr_schedule,\n gamma=config.optimizer.lr_factor,\n last_epoch=-1,\n warmup=lr_warmup,\n )\n elif config.optimizer.lr_scheduler == \"step\":\n assert len(config.optimizer.lr_schedule) == 1, config.optimizer.lr_schedule\n lr_scheduler = StepLRWithWarmup(\n optimizer,\n step_size=config.optimizer.lr_schedule[0],\n gamma=config.optimizer.lr_factor,\n last_epoch=-1,\n warmup=lr_warmup,\n )\n elif config.optimizer.lr_scheduler == \"cosine\":\n lr_scheduler = CosineAnnealingLRWithWarmup(\n optimizer,\n T_max=config.train.epochs,\n eta_min=0,\n last_epoch=-1,\n warmup=lr_warmup,\n )\n else:\n raise NotImplementedError(f\"Scheduler {config.optimizer.lr_scheduler}\")\n\n return lr_scheduler" }, { "identifier": "ClusterLoss", "path": "pixpnet/protonets/loss.py", "snippet": "class ClusterLoss(nn.Module):\n def __init__(self, class_specific=True):\n super().__init__()\n self.class_specific = class_specific\n\n def forward(self, min_distances: Tensor, target: Tensor, model: ProtoNet) -> Tensor:\n # min_distances: N x P\n if self.class_specific:\n # prototypes_of_correct_class: batch_size x num_prototypes\n prototypes_of_correct_class = torch.t(model.prototype_class_identity[:, target])\n min_distances_target = torch.where(\n prototypes_of_correct_class.bool(),\n min_distances,\n torch.tensor(torch.inf, dtype=min_distances.dtype, device=min_distances.device),\n )\n min_min_distances, _ = torch.min(min_distances_target, dim=1)\n cluster_loss = torch.mean(min_min_distances)\n else:\n min_min_distances, _ = torch.min(min_distances, dim=1)\n cluster_loss = torch.mean(min_min_distances)\n\n return cluster_loss" }, { "identifier": "L1ReadoutLoss", "path": "pixpnet/protonets/loss.py", "snippet": "class L1ReadoutLoss(nn.Module):\n def __init__(self, class_specific=True):\n super().__init__()\n self.class_specific = class_specific\n\n def forward(self, model: ProtoNet) -> Tensor:\n last_layer = model.last_layer\n if isinstance(last_layer, GroupedLinear):\n l1_loss = last_layer.weight.norm(p=1)\n else:\n if self.class_specific:\n l1_mask = 1 - torch.t(model.prototype_class_identity)\n l1_loss = (last_layer.weight * l1_mask).norm(p=1)\n else:\n l1_loss = last_layer.weight.norm(p=1)\n\n return l1_loss" }, { "identifier": "SeparationLoss", "path": "pixpnet/protonets/loss.py", "snippet": "class SeparationLoss(nn.Module):\n @staticmethod\n def forward(min_distances: Tensor, target: Tensor, model: ProtoNet, return_avg: bool = False):\n \"\"\"\n Here we want to maximize the minimum of all minimum proto-patch\n distances (each being some patch that is closest to a given prototype)\n for each non-class prototype. In effect, for each sample, a patch is\n selected for each non-class prototype according to minimum distance. So,\n we end up with one patch and one prototype per sample after taking the\n minimum of the proto-patch distances.\n \"\"\"\n # min_distances: N x P\n # prototype_class_identity: P x C\n # prototypes_of_correct_class: N x P\n prototypes_of_correct_class = torch.t(model.prototype_class_identity[:, target]).bool()\n min_distances_nontarget = torch.where(\n prototypes_of_correct_class.bool(),\n torch.tensor(torch.inf, dtype=min_distances.dtype, device=min_distances.device),\n min_distances,\n )\n dists_to_nontarget_prototypes, _ = torch.min(min_distances_nontarget, dim=1)\n separation_loss = -torch.mean(dists_to_nontarget_prototypes)\n\n if not return_avg:\n return separation_loss\n # otherwise\n min_distances_nontarget = torch.where(\n prototypes_of_correct_class.bool(),\n torch.tensor(0, dtype=min_distances.dtype, device=min_distances.device),\n min_distances,\n )\n avg_separation_cost = torch.sum(min_distances_nontarget, dim=1) / torch.sum(\n ~prototypes_of_correct_class.bool(), dim=1\n )\n avg_separation_cost = -torch.mean(avg_separation_cost)\n return separation_loss, avg_separation_cost" }, { "identifier": "ProtoNet", "path": "pixpnet/protonets/models/protonet.py", "snippet": "class ProtoNet(nn.Module):\n # Buffers\n ones: torch.Tensor\n corresponding_sample_idxs: torch.Tensor\n min_fmap_idxs: torch.Tensor\n prototype_class_identity: Optional[torch.Tensor]\n # Parameters\n prototype_vectors: torch.nn.Parameter\n\n # Constants\n prototype_layer_stride = 1\n\n def __init__(\n self,\n features: nn.Module,\n feature_layer: str,\n rf_slices: Optional[SlicesType],\n num_prototypes: int,\n prototype_dim: int,\n prototype_kernel_size: int,\n num_classes: int,\n init_weights: bool = True,\n prototype_activation: Union[str, Callable] = \"log\",\n add_on_layers_type: str = \"regular\",\n class_specific: bool = True,\n epsilon: float = 1e-6,\n learn_prototypes: bool = True,\n incorrect_strength: float = -0.5,\n correct_strength: float = 1,\n readout_type: str = \"linear\",\n distance: str = \"l2\",\n ):\n \"\"\"\"\"\"\n super().__init__()\n self.prototype_shape = (num_prototypes, prototype_dim, prototype_kernel_size, prototype_kernel_size)\n self.num_prototypes = num_prototypes\n self.prototype_dim = prototype_dim\n self.prototype_kernel_size = prototype_kernel_size\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.learn_prototypes = learn_prototypes\n # prototype_activation could be 'log', 'linear',\n # or a callable that converts distance to similarity score\n self.prototype_activation = prototype_activation\n self.distance = distance\n self.feature_layer = feature_layer\n\n self.rf_slices = rf_slices\n self.rf_idxs = None\n self.rf_sizes = None\n if self.rf_slices is not None:\n Hz = len(self.rf_slices)\n Wz = len(self.rf_slices[0])\n self.rf_sizes = torch.zeros((Hz, Wz, 2), dtype=torch.int)\n self.rf_idxs = torch.zeros((Hz, Wz, 4), dtype=torch.int)\n for h in range(Hz):\n for w in range(Wz):\n # for patch h,w\n if len(self.rf_slices[h][w]) > 1:\n raise NotImplementedError\n for h_s, w_s in self.rf_slices[h][w]:\n # Start weighting approach\n h_size = h_s.stop - h_s.start\n w_size = w_s.stop - w_s.start\n self.rf_sizes[h, w] = torch.tensor([h_size, w_size], dtype=torch.int)\n self.rf_idxs[h, w] = torch.tensor([h_s.start, h_s.stop, w_s.start, w_s.stop], dtype=torch.int)\n\n self.incorrect_strength = incorrect_strength\n self.correct_strength = correct_strength\n self.class_specific = class_specific\n if self.class_specific:\n # Here we are initializing the class identities of the prototypes.\n # Without domain specific knowledge we allocate the same number of\n # prototypes for each class\n assert self.num_prototypes % self.num_classes == 0\n # a one-hot indication matrix for each prototype's class identity\n self.register_buffer(\n \"prototype_class_identity\", torch.zeros(self.num_prototypes, self.num_classes, dtype=torch.int)\n )\n num_prototypes_per_class = self.num_prototypes // self.num_classes\n for j in range(self.num_prototypes):\n self.prototype_class_identity[j, j // num_prototypes_per_class] = 1\n\n # this has to be named features to allow the precise loading\n self.features = features\n self._init_add_on_layers(add_on_layers_type)\n\n self.register_parameter(\n \"prototype_vectors\", nn.Parameter(torch.rand(self.prototype_shape), requires_grad=learn_prototypes)\n )\n self.register_buffer(\"ones\", torch.ones(self.prototype_shape))\n self.register_buffer(\"corresponding_sample_idxs\", torch.full((self.num_prototypes,), -1))\n self.register_buffer(\"min_fmap_idxs\", torch.full((self.num_prototypes, 4), -1))\n\n self.readout_type = readout_type\n self._init_last_layer()\n\n if init_weights:\n self._initialize_weights()\n\n def _init_last_layer(self):\n # do not use bias to aid interpretability\n if self.readout_type == \"linear\": # standard linear\n self.last_layer = nn.Linear(self.num_prototypes, self.num_classes, bias=False)\n elif self.readout_type == \"sparse\": # sparse linear\n if not self.class_specific:\n raise ValueError('`readout_type` cannot be \"sparse\" if ' \"`class_specific` is False\")\n self.last_layer = GroupedLinear(self.num_prototypes, self.num_classes, groups=self.num_classes, bias=False)\n elif self.readout_type == \"proto\": # prototype sim sums as prediction\n if not self.class_specific:\n raise ValueError('`readout_type` cannot be \"proto\" if ' \"`class_specific` is False\")\n # Note that this assumes that `prototype_class_identity` is still\n # uniform across classes when class_specific is True\n self.last_layer = GroupedSum(self.num_prototypes, self.num_classes)\n else:\n raise NotImplementedError(f\"readout_type = {self.readout_type}\")\n\n def _init_add_on_layers(self, add_on_layers_type):\n in_channels = self.features.out_channels\n\n final_act, final_act_str = nn.Sigmoid(), \"sigmoid\"\n if add_on_layers_type == \"bottleneck\":\n add_on_layers = []\n current_in_channels = in_channels\n conv_idx = 1\n while current_in_channels > self.prototype_dim or not len(add_on_layers):\n current_out_channels = max(self.prototype_dim, (current_in_channels // 2))\n if current_out_channels > self.prototype_dim:\n conv2_str, act2, act2_str = (f\"conv{conv_idx + 1}\", nn.ReLU(), f\"relu{conv_idx + 1}\")\n else:\n assert current_out_channels == self.prototype_dim\n conv2_str, act2, act2_str = (\"conv_last\", final_act, final_act_str)\n add_on_layers.extend(\n (\n (\n f\"conv{conv_idx}\",\n nn.Conv2d(\n in_channels=current_in_channels, out_channels=current_out_channels, kernel_size=1\n ),\n ),\n (f\"relu{conv_idx}\", nn.ReLU()),\n (\n conv2_str,\n nn.Conv2d(\n in_channels=current_out_channels, out_channels=current_out_channels, kernel_size=1\n ),\n ),\n (act2_str, act2),\n )\n )\n current_in_channels = current_in_channels // 2\n conv_idx += 2\n elif add_on_layers_type == \"regular\":\n add_on_layers = (\n (\"conv1\", nn.Conv2d(in_channels=in_channels, out_channels=self.prototype_dim, kernel_size=1)),\n (\"relu1\", nn.ReLU()),\n (\n \"conv_last\",\n nn.Conv2d(in_channels=self.prototype_dim, out_channels=self.prototype_dim, kernel_size=1),\n ),\n (final_act_str, final_act),\n )\n else:\n raise ValueError(add_on_layers_type)\n add_on_layers = OrderedDict(add_on_layers)\n\n self.add_on_layers = nn.Sequential(add_on_layers)\n\n def conv_features(self, x):\n \"\"\"\n the feature input to prototype layer\n \"\"\"\n x = self.features(x)\n log_once(logger.info, f'features output shape: {(\"N\", *x.size()[1:])}')\n x = self.add_on_layers(x)\n log_once(logger.info, f'add_on_layers output shape: {(\"N\", *x.size()[1:])}')\n return x\n\n def compute_distances(self, x):\n return compute_distances(self.distance, x, self.prototype_vectors, self.ones)\n\n def prototype_distances(self, x):\n \"\"\"\n x is the raw input\n \"\"\"\n conv_features = self.conv_features(x)\n distances = self.compute_distances(conv_features)\n return conv_features, distances\n\n def dist_2_sim(self, distances):\n if self.prototype_activation == \"log\":\n # equivalent:\n # log((distances + 1) / (distances + epsilon)) # noqa: E800\n # but this one is numerically more accurate\n return torch.log(1 / (distances + self.epsilon) + 1)\n elif self.prototype_activation == \"linear\":\n if self.distance == \"cosine\":\n # dists = 1 - sim --> sim = 1 - dists\n return 1 - distances\n else:\n return -distances\n else:\n return self.prototype_activation(distances)\n\n def forward(self, x, return_features=False):\n result = self.prototype_distances(x)\n conv_features, distances = result\n outputs = self.classify_head(x, distances)\n if return_features:\n outputs[\"features\"] = conv_features\n return outputs\n\n def classify_head(self, x, distances):\n return self._classify_head_proto2patch(distances)\n\n def pixel_space_map(self, x_i, proto_dists, sigma_factor=1.0):\n # Note: one sample at a time! otherwise there will definitely be\n # memory issues on most hardware and ProtoNets\n dtype = proto_dists.dtype\n device = proto_dists.device\n\n # validate shape\n if x_i.ndim == 4:\n assert x_i.shape[0] == 1, x_i.shape\n x_i = torch.squeeze(x_i, 0)\n else:\n assert x_i.ndim == 3, x_i.shape\n\n if proto_dists.ndim == 4:\n assert proto_dists.shape[0] == 1, proto_dists.shape\n proto_dists = torch.squeeze(proto_dists, 0)\n else:\n assert proto_dists.ndim == 3, proto_dists.shape\n\n C, H, W = x_i.shape\n P, Hz, Wz = proto_dists.shape\n\n # dists --> sims\n proto_sims = self.dist_2_sim(proto_dists)\n # Sim maps\n heat_map_max = torch.zeros((P, H, W), dtype=dtype, device=device)\n heat_map_avg = torch.zeros_like(heat_map_max)\n heat_map_counts = torch.zeros_like(heat_map_avg, dtype=torch.int)\n\n rf_h = self.rf_sizes[:, :, 0].max()\n rf_w = self.rf_sizes[:, :, 1].max()\n\n do_super_rfs = rf_h >= H or rf_w >= W\n if do_super_rfs:\n # increase true rf_h/w\n where_big = torch.where((self.rf_sizes[:, :, 0] >= H) | (self.rf_sizes[:, :, 1] >= W))\n do_super_rfs = len(where_big[0]) > 1\n if do_super_rfs:\n # linear stretching assumption for super-100% RF networks\n naive_midpoints_h = torch.round((torch.arange(Hz) + 0.5) * H / Hz).int()\n naive_midpoints_w = torch.round((torch.arange(Wz) + 0.5) * W / Wz).int()\n\n im_midpoints = (H - 1) / 2, (W - 1) / 2\n\n pad_h = torch.round((im_midpoints[0] - naive_midpoints_h[where_big[0]]).abs().max()).int()\n pad_w = torch.round((im_midpoints[1] - naive_midpoints_w[where_big[1]]).abs().max()).int()\n\n # increase the RFs by the discovered padding amount\n rf_h = rf_h + 2 * pad_h\n rf_w = rf_w + 2 * pad_w\n\n k_size = max(rf_h, rf_w)\n sigma = k_size * sigma_factor\n g_kern = gaussian_kernel(k_size, sigma=sigma, device=device)\n\n for h in range(Hz):\n for w in range(Wz):\n # for patch h,w\n sims_hw = proto_sims[:, h, w][:, None, None] # P x 1 x 1\n h_size, w_size = self.rf_sizes[h, w] # rf_sizes: Hz x Wz x 2\n\n hs0, hs1, ws0, ws1 = self.rf_idxs[h, w]\n\n if do_super_rfs:\n mh, mw = naive_midpoints_h[h], naive_midpoints_w[w]\n\n hs0_ = mh - rf_h // 2\n hs1_ = mh + ceil(rf_h // 2)\n ws0_ = mw - rf_w // 2\n ws1_ = mw + ceil(rf_w // 2)\n\n h_pad0 = max(-hs0_, 0)\n h_pad1 = max(hs1_ - H - max(hs0_, 0), 0)\n w_pad0 = max(-ws0_, 0)\n w_pad1 = max(ws1_ - W - max(ws0_, 0), 0)\n\n if h_size < H:\n if hs0 != 0:\n h_pad0 += H - h_size\n else:\n h_pad1 += H - h_size\n if w_size < W:\n if ws0 != 0:\n w_pad0 += W - w_size\n else:\n w_pad1 += W - w_size\n\n g_kern_hw = g_kern[int(h_pad0) : k_size - ceil(h_pad1), int(w_pad0) : k_size - ceil(w_pad1)]\n else:\n h_pad0 = h_pad1 = 0\n w_pad0 = w_pad1 = 0\n if h_size < rf_h:\n if hs1 - rf_h < 0:\n h_pad0 += rf_h - h_size\n else:\n h_pad1 += rf_h - h_size\n if w_size < rf_w:\n if ws1 - rf_w < 0:\n w_pad0 += rf_w - w_size\n else:\n w_pad1 += rf_w - w_size\n g_kern_hw = g_kern[int(h_pad0) : k_size - ceil(h_pad1), int(w_pad0) : k_size - ceil(w_pad1)]\n\n sims_hw_full = sims_hw * g_kern_hw[None, :, :]\n\n heat_map_avg[:, hs0:hs1, ws0:ws1] += sims_hw_full\n heat_map_counts[:, hs0:hs1, ws0:ws1] += 1\n heat_map_max[:, hs0:hs1, ws0:ws1] = torch.maximum(sims_hw_full, heat_map_max[:, hs0:hs1, ws0:ws1])\n # take element-wise averages according to overlap tensor (counts)\n heat_map_sum = heat_map_avg.clone()\n heat_map_avg /= heat_map_counts\n\n return heat_map_max, heat_map_avg, heat_map_sum # each is P x H x W\n\n def pixel_space_upscale(self, x_i, proto_dists):\n # validate shape\n if x_i.ndim == 4:\n assert x_i.shape[0] == 1, x_i.shape\n x_i = torch.squeeze(x_i, 0)\n else:\n assert x_i.ndim == 3, x_i.shape\n\n if proto_dists.ndim == 4:\n assert proto_dists.shape[0] == 1, proto_dists.shape\n proto_dists = torch.squeeze(proto_dists, 0)\n else:\n assert proto_dists.ndim == 3, proto_dists.shape\n\n C, H, W = x_i.shape\n\n # dists --> sims\n proto_sims = self.dist_2_sim(proto_dists)\n # Sim maps\n heat_map = torch.nn.functional.interpolate(proto_sims[None], (H, W), mode=\"bicubic\")\n # 1 x P x H x W --> P x H x W\n heat_map = heat_map.squeeze(dim=0)\n\n return heat_map\n\n def pixel_space_bboxes(self, min_dist_idxs, proto_dists):\n if not (self.prototype_kernel_size == self.prototype_layer_stride == 1):\n raise NotImplementedError((self.prototype_kernel_size, self.prototype_layer_stride))\n N, P = min_dist_idxs.shape\n # N x P, N x P\n fmap_h_start, fmap_w_start = unravel_index(min_dist_idxs, proto_dists.shape[-2:])\n\n bboxes = []\n for i in range(N):\n bboxes_i = []\n for j in range(P):\n h, w = fmap_h_start[i, j], fmap_w_start[i, j]\n slices_hw = self.rf_slices[h][w]\n assert len(slices_hw) == 1, \"unsupported at the moment\"\n slice_h, slice_w = slices_hw[0]\n x1, y1 = slice_w.start, slice_h.start\n x2, y2 = slice_w.stop, slice_h.stop\n bboxes_i.append([x1, y1, x2, y2])\n bboxes.append(bboxes_i)\n bboxes = torch.tensor(bboxes)\n return bboxes # N x P x 4\n\n def pixel_space_centers_upscale(self, x, min_dist_idxs, proto_dists):\n if not (self.prototype_kernel_size == self.prototype_layer_stride == 1):\n raise NotImplementedError((self.prototype_kernel_size, self.prototype_layer_stride))\n _, _, H, W = x.shape\n Hz, Wz = proto_dists.shape[-2:]\n # N x P, N x P\n fmap_h_start, fmap_w_start = unravel_index(min_dist_idxs, [Hz, Wz])\n\n naive_midpoints_h = torch.round((torch.arange(Hz) + 0.5) * H / Hz).int()\n naive_midpoints_w = torch.round((torch.arange(Wz) + 0.5) * W / Wz).int()\n\n centers_x = naive_midpoints_w[fmap_w_start.cpu()]\n centers_y = naive_midpoints_h[fmap_h_start.cpu()]\n\n return centers_x, centers_y # NxP each\n\n def _classify_head_proto2patch(self, distances):\n # global min pooling (N x P x H x W --> N x P x 1 x 1)\n # I.e., the KxK patch of the latent representations z of the input\n # images that is most similar to each of the P prototypes. Output\n # indicates how present each prototype is in the image.\n min_distances, min_dist_idxs = self.global_min_pool(distances)\n # Convert distances to similarity using the log/linear function\n prototype_activations = self.dist_2_sim(min_distances)\n\n # Compute logits (N x C)\n logits = self.last_layer(prototype_activations)\n\n return {\n \"logits\": logits, # N x C\n \"min_distances\": min_distances, # N x P\n \"min_dist_idxs\": min_dist_idxs, # N x P\n \"distances\": distances, # N x P x H x W\n \"max_similarities\": prototype_activations, # N x P\n }\n\n @staticmethod\n def global_min_pool(distances):\n \"\"\"\n To gather `min_distances` using `min_dist_idxs`:\n\n ```python\n distances.flatten(start_dim=2).gather(\n dim=2, index=min_dist_idxs.flatten(start_dim=2)\n ).view_as(min_dist_idxs)\n ```\n\n :param distances:\n :return:\n \"\"\"\n with warnings.catch_warnings():\n # You'd think they would've checked for positionally passed args...\n warnings.filterwarnings(\n \"ignore\", \".*order of the arguments: ceil_mode and \" \"return_indices will change.*\", UserWarning\n )\n min_distances, min_dist_idxs = F.max_pool2d(\n -distances, kernel_size=(distances.size()[2], distances.size()[3]), return_indices=True\n )\n min_distances = -min_distances\n # N x P x 1 x 1 --> N x P\n min_distances = min_distances.view(min_distances.shape[0], min_distances.shape[1])\n min_dist_idxs = min_dist_idxs.view(min_dist_idxs.shape[0], min_dist_idxs.shape[1])\n return min_distances, min_dist_idxs\n\n def push_forward(self, x):\n \"\"\"this method is needed for the pushing operation\"\"\"\n return self.prototype_distances(x)\n\n def set_prototypes(self, new_prototype_vectors, corresponding_sample_idxs=None, min_fmap_idxs=None):\n self.prototype_vectors.data.copy_(new_prototype_vectors)\n err_msg = \"both min_fmap_idxs and corresponding_sample_idxs should be\" \" None or not None\"\n if corresponding_sample_idxs is not None:\n assert min_fmap_idxs is not None, err_msg\n self.corresponding_sample_idxs = corresponding_sample_idxs\n self.min_fmap_idxs = min_fmap_idxs\n else:\n assert min_fmap_idxs is None, err_msg\n\n def prune_prototypes(self, prototypes_to_prune):\n \"\"\"\n prototypes_to_prune: a list of indices each in\n [0, current number of prototypes - 1] that indicates the prototypes to\n be removed\n \"\"\"\n prototypes_to_keep = [*({*range(self.num_prototypes)} - {*prototypes_to_prune})]\n\n self.register_parameter(\n \"prototype_vectors\",\n nn.Parameter(self.prototype_vectors.data[prototypes_to_keep, ...], requires_grad=self.learn_prototypes),\n )\n self.corresponding_sample_idxs = self.corresponding_sample_idxs[prototypes_to_keep, ...]\n self.min_fmap_idxs = self.min_fmap_idxs[prototypes_to_keep, ...]\n\n self.prototype_shape = tuple(self.prototype_vectors.size())\n self.num_prototypes = self.prototype_shape[0]\n\n # changing self.last_layer in place\n # changing in_features and out_features make sure the numbers are\n # consistent\n if self.readout_type != \"linear\":\n raise NotImplementedError(\n f\"Removing prototypes for readout_type={self.readout_type}\" f\" is not implemented yet\"\n )\n self.last_layer.in_features = self.num_prototypes\n self.last_layer.out_features = self.num_classes\n self.last_layer.weight.data = self.last_layer.weight.data[:, prototypes_to_keep]\n\n # self.ones is nn.Parameter\n self.ones = self.ones[prototypes_to_keep, ...]\n\n # self.prototype_class_identity is torch tensor\n # so it does not need .data access for value update\n if self.class_specific:\n self.prototype_class_identity = self.prototype_class_identity[prototypes_to_keep, :]\n\n def set_last_layer_incorrect_connection(self):\n \"\"\"\n Initialize weight of last_layer to correct_strength if\n prototype_class_identity is 1 (i.e., the prototype is for that class),\n and to incorrect_strength if prototype_class_identity is 0 (i.e., the\n prototype is not for that class)\n \"\"\"\n positive_one_weights_locations = torch.t(self.prototype_class_identity)\n negative_one_weights_locations = 1 - positive_one_weights_locations\n\n self.last_layer.weight.data.copy_(\n self.correct_strength * positive_one_weights_locations\n + self.incorrect_strength * negative_one_weights_locations\n )\n\n def _initialize_weights(self):\n for name, m in self.add_on_layers.named_children():\n if isinstance(m, nn.Conv2d):\n if name == \"conv_last\":\n # for the sigmoid activation\n nn.init.xavier_normal_(m.weight, gain=1.0)\n else:\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n if self.class_specific and self.readout_type == \"linear\":\n # This is not needed (or valid) for sparse linear or proto\n self.set_last_layer_incorrect_connection()\n elif self.class_specific and self.readout_type == \"sparse\":\n nn.init.ones_(self.last_layer.weight)" }, { "identifier": "protonet", "path": "pixpnet/protonets/models/protonet.py", "snippet": "def protonet(\n feature_extractor,\n feature_layer=None,\n pretrained=True,\n num_prototypes=2000,\n prototype_dim=512,\n prototype_kernel_size=1,\n num_classes=200,\n input_size=224,\n init_weights=True,\n prototype_activation: Union[str, Callable] = \"log\",\n add_on_layers_type=\"regular\",\n class_specific=True,\n epsilon=1e-6,\n learn_prototypes=True,\n incorrect_strength=-0.5,\n correct_strength=1,\n readout_type=\"linear\",\n distance=\"l2\",\n):\n \"\"\"\"\"\"\n if isinstance(feature_extractor, str):\n last_module_name = []\n if feature_layer:\n last_module_name.append(feature_layer)\n if len(last_module_name) == 1:\n last_module_name = last_module_name[0]\n features = get_feature_extractor(\n feature_extractor,\n pretrained=pretrained,\n last_module_name=last_module_name or None,\n )\n _, rf_data = compute_rf_data(feature_extractor, input_size, input_size, num_classes=1)\n rf_layer = rf_data[feature_layer]\n h_z, w_z = rf_layer.shape[-2:]\n rf_slices = []\n for h in range(h_z):\n slices_h = []\n for w in range(w_z):\n rf_feat_hw = take_rf_from_bbox(\n rf_layer, h, w, prototype_kernel_size, prototype_kernel_size, ProtoNet.prototype_layer_stride\n )\n slices_hw = []\n for slice_hw in rf_feat_hw.as_slices(all_channels=True):\n _, _, h_s, w_s = slice_hw\n slices_hw.append((h_s, w_s))\n slices_h.append(slices_hw)\n rf_slices.append(slices_h)\n\n else:\n features = feature_extractor\n rf_slices = None\n\n if feature_layer is None:\n feature_layer = features.last_module_name[0] if features.multi_output else features.last_module_name\n\n return ProtoNet(\n features=features,\n feature_layer=feature_layer,\n rf_slices=rf_slices,\n num_prototypes=num_prototypes,\n prototype_dim=prototype_dim,\n prototype_kernel_size=prototype_kernel_size,\n num_classes=num_classes,\n init_weights=init_weights,\n prototype_activation=prototype_activation,\n add_on_layers_type=add_on_layers_type,\n class_specific=class_specific,\n epsilon=epsilon,\n learn_prototypes=learn_prototypes,\n incorrect_strength=incorrect_strength,\n correct_strength=correct_strength,\n readout_type=readout_type,\n distance=distance,\n )" }, { "identifier": "push_prototypes", "path": "pixpnet/protonets/push.py", "snippet": "def push_prototypes(\n dataloader: SubsetWithIdx, protonet, class_specific=True, preprocess_func=None, duplicate_filter=\"sample\"\n):\n \"\"\"push each prototype to the nearest patch in the training set\"\"\"\n was_training = protonet.training\n protonet.eval()\n\n prototype_shape = protonet.prototype_shape\n n_prototypes = protonet.num_prototypes\n prototype_layer_stride = protonet.prototype_layer_stride\n\n device = protonet.prototype_vectors.device\n dtype = protonet.prototype_vectors.dtype\n\n # saves the closest distance seen so far\n min_proto_dists = torch.full((n_prototypes,), torch.inf, dtype=dtype, device=device)\n # saves the patch representation that gives the current smallest distance\n min_fmap_patches = torch.zeros(prototype_shape, dtype=dtype, device=device)\n # saves the sample indices that each prototype corresponds to in dataloader\n min_sample_idxs = protonet.corresponding_sample_idxs\n # save the feature map indices\n min_fmap_idxs = protonet.min_fmap_idxs\n\n with torch.no_grad():\n # Find the closest training images to each prototype across the entire\n # data set (updates closest each batch to achieve global maximums)\n for sample_idxs, x, y in dataloader:\n x = x.to(device)\n y = y.to(device)\n\n _update_prototypes_on_batch(\n sample_idxs=sample_idxs,\n x=x,\n y=y,\n protonet=protonet,\n min_proto_dists=min_proto_dists,\n min_fmap_patches=min_fmap_patches,\n min_sample_idxs=min_sample_idxs,\n min_fmap_idxs=min_fmap_idxs,\n class_specific=class_specific,\n preprocess_func=preprocess_func,\n proto_layer_stride=prototype_layer_stride,\n duplicate_filter=duplicate_filter,\n )\n\n q = torch.tensor([0, 0.25, 0.50, 0.75, 1], dtype=dtype, device=device)\n dist_percentiles = torch.quantile(min_proto_dists, q).tolist()\n logger.info(\n f\"Prototypes pushing distances stats:\\n\"\n f' {\" / \".join(f\"{x * 100:6.2f}%\" for x in q.tolist())}\\n'\n f' {\" / \".join(f\"{x:7.4f}\" for x in dist_percentiles)}\\n'\n f\" {int(torch.isnan(min_proto_dists).sum())} / \"\n f\"{min_proto_dists.numel()} are NaN\"\n )\n\n # Executing push...\n prototype_update = torch.reshape(min_fmap_patches, prototype_shape)\n\n proto_norm_pre = torch.norm(protonet.prototype_vectors)\n proto_norm_post = torch.norm(prototype_update)\n\n logger.info(\n f\"Prototype vector Frobenius norm pre- and post-push: \" f\"{proto_norm_pre:.4f} --> {proto_norm_post:.4f}\"\n )\n\n protonet.set_prototypes(\n prototype_update, # P x D x K x K\n corresponding_sample_idxs=min_sample_idxs, # P\n min_fmap_idxs=min_fmap_idxs, # P x 4\n )\n\n if was_training:\n protonet.train()" }, { "identifier": "get_logger", "path": "pixpnet/utils.py", "snippet": "def get_logger(name):\n logging.basicConfig(\n format=\"%(asctime)s[%(process)d][%(levelname)s] %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n logger = logging.getLogger(name)\n logger.setLevel(os.environ.get(\"PIXPNET_LOG_LEVEL\", \"INFO\"))\n return logger" }, { "identifier": "intersect_func_and_kwargs", "path": "pixpnet/utils.py", "snippet": "def intersect_func_and_kwargs(func, kwargs, exclude_func_args=None, exclude_kwargs=None, return_invalid=True):\n func_args = {*get_all_func_args(func)} - (set() if exclude_func_args is None else {*exclude_func_args})\n if isinstance(kwargs, argparse.Namespace):\n kwargs = vars(kwargs)\n kwargs_keys = {*kwargs.keys()} - (set() if exclude_kwargs is None else {*exclude_kwargs})\n\n intersecting_keys = kwargs_keys & func_args\n intersected_dict = {k: kwargs[k] for k in intersecting_keys}\n if return_invalid:\n return intersected_dict, kwargs_keys - func_args\n return intersected_dict" } ]
import argparse import torch from typing import Tuple from torch import nn from torchmetrics import Accuracy from pytorch_lightning.loops import FitLoop from pytorch_lightning.loops.fit_loop import _FitLoop as FitLoop from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.loggers import TensorBoardLogger from pixpnet.data import get_metadata from pixpnet.lightning.lightning_data import LitData from pixpnet.lightning.lit_module import BaseLitModel from pixpnet.optim import get_optimizer_cls, get_scheduler from pixpnet.protonets.loss import ClusterLoss, L1ReadoutLoss, SeparationLoss from pixpnet.protonets.models.protonet import ProtoNet, protonet from pixpnet.protonets.push import push_prototypes from pixpnet.utils import get_logger, intersect_func_and_kwargs
11,497
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) # # SPDX-License-Identifier: AGPL-3.0-or-later try: except ImportError: logger = get_logger(__name__) def params_with_grad(parameters): return filter(lambda p: p.requires_grad, parameters) def make_optimizers_proto( model: ProtoNet, config: argparse.Namespace, ) -> Tuple[torch.optim.Optimizer, ...]: """""" optimizer_cls, hparams = get_optimizer_cls(config, ignore={"fine_tune_lr", "readout_lr"}) readout_params = None if model.last_layer is not None: readout_params = [ { "params": params_with_grad(model.last_layer.parameters()), "lr": config.optimizer.readout_lr, "weight_decay": 0, }, ] all_params = [ # feature extractor {"params": params_with_grad(model.features.parameters()), "lr": config.optimizer.fine_tune_lr}, # add on layers {"params": params_with_grad(model.add_on_layers.parameters())}, # prototype layers {"params": params_with_grad([model.prototype_vectors]), "weight_decay": 0}, ] readout_optimizer = None if readout_params is not None: all_params += readout_params readout_optimizer = optimizer_cls(params=readout_params, **hparams) optimizer = optimizer_cls(params=all_params, **hparams) return optimizer, readout_optimizer def _set_grad(model, features=True, add_on_layers=True, prototype_vectors=True, last_layer=True): for p in model.features.parameters(): p.requires_grad = features for p in model.add_on_layers.parameters(): p.requires_grad = add_on_layers model.prototype_vectors.requires_grad = prototype_vectors if model.last_layer is not None: for p in model.last_layer.parameters(): p.requires_grad = last_layer def last_only(model): _set_grad(model, features=False, add_on_layers=False, prototype_vectors=False) def warm_only(model): _set_grad(model, features=False) def joint(model): _set_grad(model) class ProtoLitModel(BaseLitModel): def __init__(self, config, feature_extractor=None): super().__init__(config) metadata = get_metadata(config) self.num_classes = metadata.output_size self.input_size = metadata.input_size hparams, invalid_keys = intersect_func_and_kwargs( protonet, config.model, exclude_func_args={"num_classes"}, exclude_kwargs={"name"}, ) if invalid_keys: logger.warning( f"Will not pass the following invalid model " f"hyperparameters to {protonet.__name__}: " f'{", ".join(invalid_keys)}' ) logger.info(f"Model hyperparameters for {protonet.__name__}: " f"{hparams}") if feature_extractor is not None: logger.info( f"feature_extractor is not None, ignoring config " f'option of {hparams.get("feature_extractor")}' ) hparams["feature_extractor"] = feature_extractor self.model = protonet(num_classes=self.num_classes, input_size=self.input_size, **hparams) self.lr_scheduler = None self.readout_optimizer = self.lr_scheduler_configs = None # losses self.xent = self._metric_per_split(nn.CrossEntropyLoss) class_specific = self.config.model.class_specific
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) # # SPDX-License-Identifier: AGPL-3.0-or-later try: except ImportError: logger = get_logger(__name__) def params_with_grad(parameters): return filter(lambda p: p.requires_grad, parameters) def make_optimizers_proto( model: ProtoNet, config: argparse.Namespace, ) -> Tuple[torch.optim.Optimizer, ...]: """""" optimizer_cls, hparams = get_optimizer_cls(config, ignore={"fine_tune_lr", "readout_lr"}) readout_params = None if model.last_layer is not None: readout_params = [ { "params": params_with_grad(model.last_layer.parameters()), "lr": config.optimizer.readout_lr, "weight_decay": 0, }, ] all_params = [ # feature extractor {"params": params_with_grad(model.features.parameters()), "lr": config.optimizer.fine_tune_lr}, # add on layers {"params": params_with_grad(model.add_on_layers.parameters())}, # prototype layers {"params": params_with_grad([model.prototype_vectors]), "weight_decay": 0}, ] readout_optimizer = None if readout_params is not None: all_params += readout_params readout_optimizer = optimizer_cls(params=readout_params, **hparams) optimizer = optimizer_cls(params=all_params, **hparams) return optimizer, readout_optimizer def _set_grad(model, features=True, add_on_layers=True, prototype_vectors=True, last_layer=True): for p in model.features.parameters(): p.requires_grad = features for p in model.add_on_layers.parameters(): p.requires_grad = add_on_layers model.prototype_vectors.requires_grad = prototype_vectors if model.last_layer is not None: for p in model.last_layer.parameters(): p.requires_grad = last_layer def last_only(model): _set_grad(model, features=False, add_on_layers=False, prototype_vectors=False) def warm_only(model): _set_grad(model, features=False) def joint(model): _set_grad(model) class ProtoLitModel(BaseLitModel): def __init__(self, config, feature_extractor=None): super().__init__(config) metadata = get_metadata(config) self.num_classes = metadata.output_size self.input_size = metadata.input_size hparams, invalid_keys = intersect_func_and_kwargs( protonet, config.model, exclude_func_args={"num_classes"}, exclude_kwargs={"name"}, ) if invalid_keys: logger.warning( f"Will not pass the following invalid model " f"hyperparameters to {protonet.__name__}: " f'{", ".join(invalid_keys)}' ) logger.info(f"Model hyperparameters for {protonet.__name__}: " f"{hparams}") if feature_extractor is not None: logger.info( f"feature_extractor is not None, ignoring config " f'option of {hparams.get("feature_extractor")}' ) hparams["feature_extractor"] = feature_extractor self.model = protonet(num_classes=self.num_classes, input_size=self.input_size, **hparams) self.lr_scheduler = None self.readout_optimizer = self.lr_scheduler_configs = None # losses self.xent = self._metric_per_split(nn.CrossEntropyLoss) class_specific = self.config.model.class_specific
self.l1 = self._metric_per_split(L1ReadoutLoss, class_specific=class_specific)
6
2023-12-06 23:49:31+00:00
16k
open-mmlab/PIA
animatediff/pipelines/i2v_pipeline.py
[ { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" }, { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module = True,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # Image to Video Conv\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n @property\n def attn_processors(self) -> Dict[str, AttnProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]):\n if hasattr(module, \"set_processor\"):\n processors[f\"{name}.processor\"] = module.processor\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]):\n r\"\"\"\n Parameters:\n `processor (`dict` of `AttnProcessor` or `AttnProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n of **all** `CrossAttention` layers.\n In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.:\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n print(f'Set {module}')\n module.set_processor(processor)\n else:\n print(f'Set {module}')\n module.set_processor(processor.pop(f\"{name}.processor\"))\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n mask_sample: torch.FloatTensor,\n masked_sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n image_embeds: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # image to video b c f h w\n sample = torch.cat([sample, mask_sample, masked_sample], dim=1).to(sample.device)\n\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * - 10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # prepare for ip-adapter\n if image_embeds is not None:\n image_embeds = self.encoder_hid_proj(\n image_embeds).to(encoder_hidden_states.dtype)\n encoder_hidden_states = torch.cat(\n [encoder_hidden_states, image_embeds], dim=1)\n\n # pre-process\n # b c f h w\n # 2 4 16 64 64\n sample = self.conv_in(sample)\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n return text_model_dict" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config, only_decoder=False, only_encoder=False):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n if only_decoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('decoder') or k.startswith('post_quant')}\n elif only_encoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('encoder') or k.startswith('quant')}\n\n return new_checkpoint" }, { "identifier": "convert_lora_model_level", "path": "animatediff/utils/convert_lora_safetensor_to_diffusers.py", "snippet": "def convert_lora_model_level(state_dict, unet, text_encoder=None, LORA_PREFIX_UNET=\"lora_unet\", LORA_PREFIX_TEXT_ENCODER=\"lora_te\", alpha=0.6):\n \"\"\"convert lora in model level instead of pipeline leval\n \"\"\"\n\n visited = []\n\n # directly update weight in diffusers model\n for key in state_dict:\n # it is suggested to print out the key, it usually will be something like below\n # \"lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight\"\n\n # as we have set the alpha beforehand, so just skip\n if \".alpha\" in key or key in visited:\n continue\n\n if \"text\" in key:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_TEXT_ENCODER + \"_\")[-1].split(\"_\")\n assert text_encoder is not None, (\n 'text_encoder must be passed since lora contains text encoder layers')\n curr_layer = text_encoder\n else:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_UNET + \"_\")[-1].split(\"_\")\n curr_layer = unet\n\n # find the target layer\n temp_name = layer_infos.pop(0)\n while len(layer_infos) > -1:\n try:\n curr_layer = curr_layer.__getattr__(temp_name)\n if len(layer_infos) > 0:\n temp_name = layer_infos.pop(0)\n elif len(layer_infos) == 0:\n break\n except Exception:\n if len(temp_name) > 0:\n temp_name += \"_\" + layer_infos.pop(0)\n else:\n temp_name = layer_infos.pop(0)\n\n pair_keys = []\n if \"lora_down\" in key:\n pair_keys.append(key.replace(\"lora_down\", \"lora_up\"))\n pair_keys.append(key)\n else:\n pair_keys.append(key)\n pair_keys.append(key.replace(\"lora_up\", \"lora_down\"))\n\n # update weight\n # NOTE: load lycon, meybe have bugs :(\n if 'conv_in' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n shape[1] = 4\n curr_layer.weight.data[:, :4, ...] += alpha * (weight_up @ weight_down).view(*shape)\n elif 'conv' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n curr_layer.weight.data += alpha * (weight_up @ weight_down).view(*shape)\n elif len(state_dict[pair_keys[0]].shape) == 4:\n weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)\n weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3).to(curr_layer.weight.data.device)\n else:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).to(curr_layer.weight.data.device)\n\n # update visited list\n for item in pair_keys:\n visited.append(item)\n\n return unet, text_encoder" }, { "identifier": "prepare_mask_coef_by_statistics", "path": "animatediff/utils/util.py", "snippet": "def prepare_mask_coef_by_statistics(video_length: int, cond_frame: int, sim_range: int):\n assert video_length > 0, \\\n 'video_length should be greater than 0'\n\n assert video_length > cond_frame,\\\n 'video_length should be greater than cond_frame'\n\n range_list = RANGE_LIST\n\n assert sim_range < len(range_list),\\\n f'sim_range type{sim_range} not implemented'\n\n coef = range_list[sim_range]\n coef = coef + ([coef[-1]] * (video_length - len(coef)))\n\n order = [abs(i - cond_frame) for i in range(video_length)]\n coef = [coef[order[i]] for i in range(video_length)]\n\n return coef" } ]
import inspect import os.path as osp import numpy as np import torch from dataclasses import dataclass from typing import Callable, List, Optional, Union from diffusers.configuration_utils import FrozenDict from diffusers.loaders import IPAdapterMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import (DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler) from diffusers.utils import (BaseOutput, deprecate, is_accelerate_available, logging) from diffusers.utils.import_utils import is_xformers_available from einops import rearrange from omegaconf import OmegaConf from packaging import version from safetensors import safe_open from tqdm import tqdm from transformers import (CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection) from animatediff.models.resnet import InflatedConv3d from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import (convert_ldm_clip_checkpoint, convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint) from animatediff.utils.convert_lora_safetensor_to_diffusers import \ convert_lora_model_level from animatediff.utils.util import prepare_mask_coef_by_statistics from accelerate import cpu_offload
13,276
class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], # memory_format: torch.memory_format, feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) # self.memory_format = memory_format self.use_ip_adapter = False @classmethod def build_pipeline(cls, base_cfg, base_model: str, unet_path: str, dreambooth_path: Optional[str] = None, lora_path: Optional[str] = None, lora_alpha: float = 0, vae_path: Optional[str] = None, ip_adapter_path: Optional[str] = None, ip_adapter_scale: float = 0.0, only_load_vae_decoder: bool = False, only_load_vae_encoder: bool = False) -> 'I2VPipeline': """Method to build pipeline in a faster way~ Args: base_cfg: The config to build model base_mode: The model id to initialize StableDiffusion unet_path: Path for i2v unet dreambooth_path: path for dreambooth model lora_path: path for lora model lora_alpha: value for lora scale only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt and maitain encoder as original. """ # build unet unet = UNet3DConditionModel.from_pretrained_2d( base_model, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container( base_cfg.unet_additional_kwargs)) old_weights = unet.conv_in.weight old_bias = unet.conv_in.bias
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], # memory_format: torch.memory_format, feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) # self.memory_format = memory_format self.use_ip_adapter = False @classmethod def build_pipeline(cls, base_cfg, base_model: str, unet_path: str, dreambooth_path: Optional[str] = None, lora_path: Optional[str] = None, lora_alpha: float = 0, vae_path: Optional[str] = None, ip_adapter_path: Optional[str] = None, ip_adapter_scale: float = 0.0, only_load_vae_decoder: bool = False, only_load_vae_encoder: bool = False) -> 'I2VPipeline': """Method to build pipeline in a faster way~ Args: base_cfg: The config to build model base_mode: The model id to initialize StableDiffusion unet_path: Path for i2v unet dreambooth_path: path for dreambooth model lora_path: path for lora model lora_alpha: value for lora scale only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt and maitain encoder as original. """ # build unet unet = UNet3DConditionModel.from_pretrained_2d( base_model, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container( base_cfg.unet_additional_kwargs)) old_weights = unet.conv_in.weight old_bias = unet.conv_in.bias
new_conv1 = InflatedConv3d(
0
2023-12-21 03:29:34+00:00
16k
xinghaochen/TinySAM
tinysam/hierarchical_mask_generator.py
[ { "identifier": "Sam", "path": "tinysam/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "tinysam/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n #import pdb; pdb.set_trace()\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "tinysam/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "tinysam/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "tinysam/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "tinysam/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "tinysam/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "tinysam/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "tinysam/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "tinysam/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "tinysam/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "tinysam/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "tinysam/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "tinysam/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "tinysam/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,166
# Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask": data["segmentations"] = [rle_to_mask(rle) for rle in data["rles"]] else: data["segmentations"] = data["rles"] # Write mask records curr_anns = [] for idx in range(len(data["segmentations"])): ann = { "segmentation": data["segmentations"][idx], "area": area_from_rle(data["rles"][idx]), "bbox": box_xyxy_to_xywh(data["boxes"][idx]).tolist(), "predicted_iou": data["iou_preds"][idx].item(), "point_coords": [data["points"][idx].tolist()], "stability_score": data["stability_score"][idx].item(), } curr_anns.append(ann) # print("post use time: {}".format(time.time() - st)) return curr_anns @staticmethod def postprocess_small_regions( mask_data: MaskData, min_area: int, nms_thresh: float ) -> MaskData: """ Removes small disconnected regions and holes in masks, then reruns box NMS to remove any new duplicates. Edits mask_data in place. Requires open-cv as a dependency. """ if len(mask_data["rles"]) == 0: return mask_data # Filter small disconnected regions and holes new_masks = [] scores = [] for rle in mask_data["rles"]: mask = rle_to_mask(rle)
# Copyright 2023 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ class SamHierarchicalMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, high_score_thresh: float = 8.5, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. high_score_thresh (float): A filtering threshold in [-inf,inf], to find out the unmasked area for the next generation. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_side = points_per_side self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.high_score_thresh = high_score_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode def set_point_grids(self, point_grids): self.point_grids = point_grids def set_points_per_side(self, points_per_side): self.point_grids = build_all_layer_point_grids( points_per_side, 0, 1, ) @torch.no_grad() def set_image(self, image: np.ndarray) -> MaskData: # Crop the image and calculate embeddings self.predictor.set_image(image) @torch.no_grad() def hierarchical_generate(self, image: np.ndarray) -> List[Dict[str, Any]]: self.set_image(image) self.set_points_per_side(self.points_per_side // 4) ori_masks, or_results = self.generate(image, True) ih, iw, _ = image.shape hstride = ih // self.points_per_side wstride = iw // self.points_per_side new_points = [] pass_counter = 0 full_point_grids = np.array(self.point_grids) for mask in range(full_point_grids.shape[1]): point_coords = [full_point_grids[0, mask, 0] * iw, full_point_grids[0, mask, 1] * ih] for sy in [-1, 0, 1]: for sx in [-1, 0, 1]: if (sy == 0 and sx == 0) or or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[0] + wstride * 2 < iw: for sx in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[1] + hstride * 2 < ih: for sy in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih]) if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih]) self.set_point_grids([np.array(new_points)]) new_masks = self.generate(image, False) new_masks.cat(ori_masks) new_masks = self.post_process(image, new_masks) return new_masks @torch.no_grad() def generate(self, image: np.ndarray, need_high: bool) -> MaskData: orig_size = image.shape[:2] # Get points for this crop points_scale = np.array(orig_size)[None, ::-1] points_for_image = self.point_grids[0] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, orig_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], return_logits=True, ) # Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask": data["segmentations"] = [rle_to_mask(rle) for rle in data["rles"]] else: data["segmentations"] = data["rles"] # Write mask records curr_anns = [] for idx in range(len(data["segmentations"])): ann = { "segmentation": data["segmentations"][idx], "area": area_from_rle(data["rles"][idx]), "bbox": box_xyxy_to_xywh(data["boxes"][idx]).tolist(), "predicted_iou": data["iou_preds"][idx].item(), "point_coords": [data["points"][idx].tolist()], "stability_score": data["stability_score"][idx].item(), } curr_anns.append(ann) # print("post use time: {}".format(time.time() - st)) return curr_anns @staticmethod def postprocess_small_regions( mask_data: MaskData, min_area: int, nms_thresh: float ) -> MaskData: """ Removes small disconnected regions and holes in masks, then reruns box NMS to remove any new duplicates. Edits mask_data in place. Requires open-cv as a dependency. """ if len(mask_data["rles"]) == 0: return mask_data # Filter small disconnected regions and holes new_masks = [] scores = [] for rle in mask_data["rles"]: mask = rle_to_mask(rle)
mask, changed = remove_small_regions(mask, min_area, mode="holes")
13
2023-12-19 11:25:54+00:00
16k
DeepWok/mase
machop/chop/passes/graph/transforms/verilog/emit_tb.py
[ { "identifier": "emit_data_in_tb_sv", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_in.py", "snippet": "def emit_data_in_tb_sv(data_width, load_path, out_file):\n buff = f\"\"\"\n`timescale 1 ns / 1 ps\n\nmodule AESL_autofifo_data_in_V (\n clk,\n reset,\n if_empty_n,\n if_read,\n if_dout,\n if_full_n,\n if_write,\n if_din,\n ready,\n done\n);\n\n //------------------------Parameter----------------------\n localparam TV_IN = \"{load_path}\";\n\n //------------------------Local signal-------------------\n parameter DATA_WIDTH = 32'd{data_width};\n parameter ADDR_WIDTH = 32'd1;\n parameter DEPTH = 32'd1;\n\n // Input and Output\n input clk;\n input reset;\n input if_write;\n input [DATA_WIDTH - 1 : 0] if_din;\n output if_full_n;\n input if_read;\n output [DATA_WIDTH - 1 : 0] if_dout;\n output if_empty_n;\n input ready;\n input done;\n\n // Inner signals\n reg [DATA_WIDTH - 1 : 0] mem[0 : DEPTH - 1];\n initial begin : initialize_mem\n integer i;\n for (i = 0; i < DEPTH; i = i + 1) begin\n mem[i] = 0;\n end\n end\n reg [ADDR_WIDTH : 0] mInPtr = 0;\n reg [ADDR_WIDTH : 0] mOutPtr = 0;\n reg mFlag_hint; // 0: empty hint, 1: full hint\n\n assign if_dout = (mOutPtr >= DEPTH) ? 0 : mem[mOutPtr];\n assign if_empty_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b0)? 1'b 0: 1'b 1;\n assign if_full_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b1)? 1'b 0: 1'b 1;\n\n //------------------------Task and function--------------\n task read_token;\n input integer fp;\n output reg [127 : 0] token;\n integer ret;\n begin\n token = \"\";\n ret = 0;\n ret = $fscanf(fp, \"%s\", token);\n end\n endtask\n\n //------------------------Read-only fifo-------------------\n\n // Write operation for read_only fifo\n initial begin : read_file_process\n integer fp;\n integer err;\n integer ret;\n integer transaction_idx;\n reg [127 : 0] token;\n reg [8*5 : 1] str;\n reg [DATA_WIDTH - 1 : 0] mem_tmp;\n mInPtr = 0;\n mFlag_hint = 0;\n transaction_idx = 0;\n fp = $fopen(TV_IN, \"r\");\n if (fp == 0) begin // Failed to open file\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", TV_IN);\n $finish;\n end\n read_token(fp, token);\n if (token != \"[[[runtime]]]\") begin // Illegal format\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp, token);\n while (token != \"[[[/runtime]]]\") begin\n if (token != \"[[transaction]]\") begin\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp, token); // skip transaction number\n\n @(posedge clk);\n #0.2;\n while (ready !== 1) begin\n @(posedge clk);\n #0.2;\n end\n read_token(fp, token);\n mInPtr = 0;\n while (token != \"[[/transaction]]\") begin\n if (mInPtr >= DEPTH) begin\n $display(\"Fifo overflow!\");\n $finish;\n end\n ret = $sscanf(token, \"0x%x\", mem_tmp);\n mem[mInPtr] = mem_tmp;\n if (ret != 1) begin\n $display(\"Failed to parse token!\");\n $finish;\n end\n mInPtr = mInPtr + 1;\n read_token(fp, token);\n end\n mFlag_hint = 0;\n read_token(fp, token);\n transaction_idx = transaction_idx + 1;\n end\n $fclose(fp);\n @(posedge clk);\n #0.2;\n while (ready !== 1) begin\n @(posedge clk);\n #0.2;\n end\n mInPtr = 0;\n end\n\n // Read operation for read_only fifo\n always @(posedge clk) begin\n if (reset === 1) begin\n mOutPtr = 0;\n end else if (if_read === 1) begin\n if (mOutPtr < mInPtr) mOutPtr <= mOutPtr + 1;\n end\n end\n\n // Reset mOutPtr when done is pulled up\n initial begin : done_reset_mOutPtr_process\n while (1) begin\n @(posedge clk);\n #0.1;\n while (ready !== 1) begin\n @(posedge clk);\n #0.1;\n end\n mOutPtr = 0;\n end\n end\n\nendmodule\n\"\"\"\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Input data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting input data fifo failed.\"\n os.system(f\"verible-verilog-format --inplace {out_file}\")" }, { "identifier": "emit_data_in_tb_dat", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_in.py", "snippet": "def emit_data_in_tb_dat(node, data_in, out_file):\n in_size = node.meta[\"mase\"].parameters[\"hardware\"][\"verilog_param\"][\n \"DATA_IN_0_TENSOR_SIZE_DIM_0\"\n ]\n in_width = node.meta[\"mase\"].parameters[\"common\"][\"args\"][\"data_in_0\"][\"precision\"][\n 0\n ]\n assert len(data_in[0]) % in_size == 0\n\n trans = \"\"\"[[transaction]] {}\n{}\n[[/transaction]]\n\"\"\"\n\n data = [x for trans in data_in for x in trans]\n data_buff = \"\"\n trans_count = 0\n value = 0\n for i, d in enumerate(data):\n if in_size == 1 or i % in_size == in_size - 1:\n data_buff += trans.format(trans_count, hex(value))\n trans_count += 1\n value = 0\n else:\n for _ in range(0, i % in_size):\n d = d << in_width\n value = value + d\n\n buff = f\"\"\"[[[runtime]]]\n{data_buff}[[[/runtime]]]\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Input data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting input data fifo failed.\"" }, { "identifier": "emit_data_out_tb_sv", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_out.py", "snippet": "def emit_data_out_tb_sv(data_width, load_path, store_path, out_file):\n buff = f\"\"\"\n`timescale 1 ns / 1 ps\n\nmodule AESL_autofifo_data_out_V (\n clk,\n reset,\n if_empty_n,\n if_read,\n if_dout,\n if_full_n,\n if_write,\n if_din,\n ready,\n done\n);\n\n //------------------------Parameter----------------------\n localparam\n\tTV_IN\t=\t\"{load_path}\",\n\tTV_OUT\t=\t\"{store_path}\";\n\n //------------------------Local signal-------------------\n parameter DATA_WIDTH = 32'd{data_width};\n parameter ADDR_WIDTH = 32'd1;\n parameter DEPTH = 32'd1;\n\n // Input and Output\n input clk;\n input reset;\n input if_write;\n input [DATA_WIDTH - 1 : 0] if_din;\n output if_full_n;\n input if_read;\n output [DATA_WIDTH - 1 : 0] if_dout;\n output if_empty_n;\n input ready;\n input done;\n\n // Inner signals\n reg [DATA_WIDTH - 1 : 0] mem[0 : DEPTH - 1];\n initial begin : initialize_mem\n integer i;\n for (i = 0; i < DEPTH; i = i + 1) begin\n mem[i] = 0;\n end\n end\n reg [ADDR_WIDTH : 0] mInPtr = 0;\n reg [ADDR_WIDTH : 0] mOutPtr = 0;\n reg mFlag_hint; // 0: empty hint, 1: full hint\n\n assign if_dout = (mOutPtr >= DEPTH) ? 0 : mem[mOutPtr];\n assign if_empty_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b0)? 1'b 0: 1'b 1;\n assign if_full_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b1)? 1'b 0: 1'b 1;\n\n //------------------------Task and function--------------\n task read_token;\n input integer fp;\n output reg [127 : 0] token;\n integer ret;\n begin\n token = \"\";\n ret = 0;\n ret = $fscanf(fp, \"%s\", token);\n end\n endtask\n\n //------------------------Write-only fifo-------------------\n\n // Write operation for write-only fifo\n always @(posedge clk) begin\n if (reset === 1) begin\n mInPtr = 0;\n end else if (if_write) begin\n if (mInPtr < DEPTH) begin\n mem[mInPtr] = if_din;\n mInPtr <= mInPtr + 1;\n end\n end\n end\n\n // Reset mInPtr when done is pulled up\n initial begin : done_reset_mInPtr_process\n while (1) begin\n @(posedge clk);\n #0.2;\n while (done !== 1) begin\n @(posedge clk);\n #0.2;\n end\n mInPtr = 0;\n end\n end\n\n // Read operation for write-only fifo\n initial begin : write_file_process\n integer fp;\n integer transaction_idx;\n reg [8*5 : 1] str;\n integer idx;\n transaction_idx = 0;\n mOutPtr = DEPTH;\n mFlag_hint = 1;\n while (1) begin\n @(posedge clk);\n #0.1;\n while (done !== 1) begin\n @(posedge clk);\n #0.1;\n end\n fp = $fopen(TV_OUT, \"a\");\n if (fp == 0) begin // Failed to open file\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", TV_OUT);\n $finish;\n end\n $fdisplay(fp, \"[[transaction]] %d\", transaction_idx);\n for (idx = 0; idx < mInPtr; idx = idx + 1) begin\n $fdisplay(fp, \"0x%x\", mem[idx]);\n end\n $fdisplay(fp, \"[[/transaction]]\");\n transaction_idx = transaction_idx + 1;\n $fclose(fp);\n end\n end\n\nendmodule\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Output data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting output data fifo failed.\"\n os.system(f\"verible-verilog-format --inplace {out_file}\")" }, { "identifier": "emit_data_out_tb_dat", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_out.py", "snippet": "def emit_data_out_tb_dat(node, data_out, out_file):\n out_size = node.meta[\"mase\"].parameters[\"hardware\"][\"verilog_param\"][\n \"DATA_OUT_0_TENSOR_SIZE_0_DIM_0\"\n ]\n out_width = node.meta[\"mase\"].parameters[\"common\"][\"results\"][\"data_out_0\"][\n \"precision\"\n ][0]\n assert (\n len(data_out[0]) % out_size == 0\n ), f\"Cannot perfectly partition: {len(data_out[0])}/{out_size}\"\n\n trans = \"\"\"[[transaction]] {}\n{}\n[[/transaction]]\n\"\"\"\n\n data = [x for trans in data_out for x in trans]\n data_buff = \"\"\n trans_count = 0\n value = 0\n for i, d in enumerate(data):\n if out_size == 1 or i % out_size == out_size - 1:\n data_buff += trans.format(trans_count, hex(value))\n trans_count += 1\n value = 0\n else:\n for _ in range(0, i % out_size):\n d = d << out_width\n value = value + d\n\n buff = f\"\"\"[[[runtime]]]\n{data_buff}[[[/runtime]]]\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Input data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting input data fifo failed.\"" }, { "identifier": "emit_top_tb", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_testbench.py", "snippet": "def emit_top_tb(\n tv_dir,\n top_name,\n out_file,\n in_width,\n in_size,\n out_width,\n out_size,\n in_trans_num,\n out_trans_num,\n):\n sw_data_in = os.path.join(tv_dir, \"sw_data_in.dat\")\n sw_data_out = os.path.join(tv_dir, \"sw_data_out.dat\")\n hw_data_out = os.path.join(tv_dir, \"hw_data_out.dat\")\n hw_stream_size = os.path.join(tv_dir, \"data_in_stream_size.dat\")\n\n buff = f\"\"\"\n`timescale 1ns / 1ps\n\n\n`define AUTOTB_DUT {top_name}\n`define AUTOTB_DUT_INST AESL_inst_{top_name}\n`define AUTOTB_TOP {top_name}_tb\n`define AUTOTB_LAT_RESULT_FILE \"{top_name}.result.lat.rb\"\n`define AUTOTB_PER_RESULT_TRANS_FILE \"{top_name}.performance.result.transaction.xml\"\n`define AUTOTB_TOP_INST AESL_inst_apatb_{top_name}_top\n`define AUTOTB_MAX_ALLOW_LATENCY 15000000\n`define AUTOTB_CLOCK_PERIOD_DIV2 5.00\n\n`define AESL_FIFO_data_in_V AESL_autofifo_data_in_V\n`define AESL_FIFO_INST_data_in_V AESL_autofifo_inst_data_in_V\n`define AESL_FIFO_data_out_V AESL_autofifo_data_out_V\n`define AESL_FIFO_INST_data_out_V AESL_autofifo_inst_data_out_V\n`define SW_DATA_IN_DAT \"{sw_data_in}\"\n`define SW_DATA_OUT_DAT \"{sw_data_out}\"\n`define HW_DATA_OUT_DAT \"{hw_data_out}\"\nmodule `AUTOTB_TOP;\n\n parameter IN_TRANSACTION_NUM = {in_trans_num};\n parameter OUT_TRANSACTION_NUM = {out_trans_num};\n parameter PROGRESS_TIMEOUT = 10000000;\n parameter LATENCY_ESTIMATION = 0;\n parameter LENGTH_data_in_V = 1;\n parameter LENGTH_data_out_V = 1;\n parameter TOKEN_WIDTH = {max(128, 2*out_width*out_size)+16};\n parameter IN_WIDTH = {in_width};\n parameter IN_SIZE = {in_size};\n parameter OUT_WIDTH = {out_width};\n parameter OUT_SIZE = {out_size};\n\n task read_token;\n input integer fp;\n output reg [TOKEN_WIDTH-1 : 0] token;\n integer ret;\n begin\n token = \"\";\n ret = 0;\n ret = $fscanf(fp, \"%s\", token);\n end\n endtask\n\n task post_check;\n input integer fp1;\n input integer fp2;\n reg [TOKEN_WIDTH-1 : 0] token1;\n reg [TOKEN_WIDTH-1 : 0] token2;\n reg [TOKEN_WIDTH-1 : 0] golden;\n reg [TOKEN_WIDTH-1 : 0] result;\n integer ret;\n begin\n read_token(fp1, token1);\n read_token(fp2, token2);\n if (token1 != \"[[[runtime]]]\" || token2 != \"[[[runtime]]]\") begin\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp1, token1);\n read_token(fp2, token2);\n while (token1 != \"[[[/runtime]]]\" && token2 != \"[[[/runtime]]]\") begin\n if (token1 != \"[[transaction]]\" || token2 != \"[[transaction]]\") begin\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp1, token1); // skip transaction number\n read_token(fp2, token2); // skip transaction number\n read_token(fp1, token1);\n read_token(fp2, token2);\n while (token1 != \"[[/transaction]]\" && token2 != \"[[/transaction]]\") begin\n ret = $sscanf(token1, \"0x%x\", golden);\n if (ret != 1) begin\n $display(\"Failed to parse token!\");\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n ret = $sscanf(token2, \"0x%x\", result);\n if (ret != 1) begin\n $display(\"Failed to parse token!\");\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n if (golden != result) begin\n $display(\"%x (expected) vs. %x (actual) - mismatch\", golden, result);\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp1, token1);\n read_token(fp2, token2);\n end\n read_token(fp1, token1);\n read_token(fp2, token2);\n end\n end\n endtask\n\n reg AESL_clock;\n reg rst;\n reg dut_rst;\n reg start;\n reg ce;\n reg tb_continue;\n wire AESL_start;\n wire AESL_reset;\n wire AESL_ce;\n wire AESL_ready;\n wire AESL_idle;\n wire AESL_continue;\n wire AESL_done;\n reg AESL_done_delay = 0;\n reg AESL_done_delay2 = 0;\n reg AESL_ready_delay = 0;\n wire ready;\n wire ready_wire;\n wire ap_start;\n wire ap_done;\n wire ap_idle;\n wire ap_ready;\n wire [IN_WIDTH*IN_SIZE-1 : 0] data_in_V_dout;\n wire data_in_V_empty_n;\n wire data_in_V_read;\n wire [OUT_WIDTH*OUT_SIZE-1 : 0] data_out_V_din;\n wire data_out_V_full_n;\n wire data_out_V_write;\n integer done_cnt = 0;\n integer AESL_ready_cnt = 0;\n integer ready_cnt = 0;\n reg ready_initial;\n reg ready_initial_n;\n reg ready_last_n;\n reg ready_delay_last_n;\n reg done_delay_last_n;\n reg interface_done = 0;\n\n wire ap_clk;\n wire ap_rst;\n wire ap_rst_n;\n\n wire [IN_WIDTH-1:0] data_in[IN_SIZE-1:0];\n wire [OUT_WIDTH-1:0] data_out[OUT_SIZE-1:0];\n for (genvar i = 0; i < IN_SIZE; i++)\n assign data_in[i] = data_in_V_dout[i*IN_WIDTH+IN_WIDTH-1:i*IN_WIDTH];\n for (genvar i = 0; i < OUT_SIZE; i++)\n assign data_out_V_din[i*OUT_WIDTH+OUT_WIDTH-1:i*OUT_WIDTH] = data_out[i];\n\n `AUTOTB_DUT `AUTOTB_DUT_INST(\n .clk(ap_clk),\n .rst(ap_rst),\n .data_in(data_in),\n .data_in_valid(data_in_V_empty_n),\n .data_in_ready(data_in_V_read),\n .data_out(data_out),\n .data_out_ready(data_out_V_full_n),\n .data_out_valid(data_out_V_write));\n\n assign ap_done = data_out_V_write;\n assign ap_ready = data_out_V_write;\n assign ap_idle = ~ap_start;\n\n // Assignment for control signal\n assign ap_clk = AESL_clock;\n assign ap_rst = dut_rst;\n assign ap_rst_n = ~dut_rst;\n assign AESL_reset = rst;\n assign ap_start = AESL_start;\n assign AESL_start = start;\n assign AESL_done = ap_done;\n assign AESL_idle = ap_idle;\n assign AESL_ready = ap_ready;\n assign AESL_ce = ce;\n assign AESL_continue = tb_continue;\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n end else begin\n if (AESL_done !== 1 && AESL_done !== 0) begin\n $display(\"ERROR: Control signal AESL_done is invalid!\");\n $finish;\n end\n end\n end\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n end else begin\n if (AESL_ready !== 1 && AESL_ready !== 0) begin\n $display(\"ERROR: Control signal AESL_ready is invalid!\");\n $finish;\n end\n end\n end\n // Fifo Instantiation data_in_V\n\n wire fifodata_in_V_rd;\n wire [IN_WIDTH*IN_SIZE-1 : 0] fifodata_in_V_dout;\n wire fifodata_in_V_empty_n;\n wire fifodata_in_V_ready;\n wire fifodata_in_V_done;\n reg [31:0] ap_c_n_tvin_trans_num_data_in_V;\n reg data_in_V_ready_reg;\n\n `AESL_FIFO_data_in_V `AESL_FIFO_INST_data_in_V(\n .clk (AESL_clock),\n .reset (AESL_reset),\n .if_write (),\n .if_din (),\n .if_full_n (),\n .if_read (fifodata_in_V_rd),\n .if_dout (fifodata_in_V_dout),\n .if_empty_n (fifodata_in_V_empty_n),\n .ready (fifodata_in_V_ready),\n .done (fifodata_in_V_done));\n\n // Assignment between dut and fifodata_in_V\n\n // Assign input of fifodata_in_V\n assign fifodata_in_V_rd = data_in_V_read & data_in_V_empty_n;\n assign fifodata_in_V_ready = data_in_V_ready_reg | ready_initial;\n assign fifodata_in_V_done = 0;\n // Assign input of dut\n assign data_in_V_dout = fifodata_in_V_dout;\n reg reg_fifodata_in_V_empty_n;\n initial begin : gen_reg_fifodata_in_V_empty_n_process\n integer proc_rand;\n reg_fifodata_in_V_empty_n = fifodata_in_V_empty_n;\n while (1) begin\n @(fifodata_in_V_empty_n);\n reg_fifodata_in_V_empty_n = fifodata_in_V_empty_n;\n end\n end\n\n assign data_in_V_empty_n = reg_fifodata_in_V_empty_n;\n\n\n //------------------------Fifodata_out_V Instantiation--------------\n\n // The input and output of fifodata_out_V\n wire fifodata_out_V_wr;\n wire [OUT_SIZE*OUT_WIDTH-1 : 0] fifodata_out_V_din;\n wire fifodata_out_V_full_n;\n wire fifodata_out_V_ready;\n wire fifodata_out_V_done;\n\n `AESL_FIFO_data_out_V `AESL_FIFO_INST_data_out_V(\n .clk (AESL_clock),\n .reset (AESL_reset),\n .if_write (fifodata_out_V_wr),\n .if_din (fifodata_out_V_din),\n .if_full_n (fifodata_out_V_full_n),\n .if_read (),\n .if_dout (),\n .if_empty_n (),\n .ready (fifodata_out_V_ready),\n .done (fifodata_out_V_done));\n\n // Assignment between dut and fifodata_out_V\n\n // Assign input of fifodata_out_V\n assign fifodata_out_V_wr = data_out_V_write & data_out_V_full_n;\n assign fifodata_out_V_din = data_out_V_din;\n assign fifodata_out_V_ready = 0; //ready_initial | AESL_done_delay;\n assign fifodata_out_V_done = AESL_done_delay;\n // Assign input of dut\n reg reg_fifodata_out_V_full_n;\n initial begin : gen_reg_fifodata_out_V_full_n_process\n integer proc_rand;\n reg_fifodata_out_V_full_n = fifodata_out_V_full_n;\n while (1) begin\n @(fifodata_out_V_full_n);\n reg_fifodata_out_V_full_n = fifodata_out_V_full_n;\n end\n end\n\n assign data_out_V_full_n = reg_fifodata_out_V_full_n;\n\n\n initial begin : generate_AESL_ready_cnt_proc\n AESL_ready_cnt = 0;\n wait (AESL_reset === 0);\n while (AESL_ready_cnt != OUT_TRANSACTION_NUM) begin\n while (AESL_ready !== 1) begin\n @(posedge AESL_clock);\n #0.4;\n end\n @(negedge AESL_clock);\n AESL_ready_cnt = AESL_ready_cnt + 1;\n @(posedge AESL_clock);\n #0.4;\n end\n end\n\n event next_trigger_ready_cnt;\n\n initial begin : gen_ready_cnt\n ready_cnt = 0;\n wait (AESL_reset === 0);\n forever begin\n @(posedge AESL_clock);\n if (ready == 1) begin\n if (ready_cnt < OUT_TRANSACTION_NUM) begin\n ready_cnt = ready_cnt + 1;\n end\n end\n ->next_trigger_ready_cnt;\n end\n end\n\n wire all_finish = (done_cnt == OUT_TRANSACTION_NUM);\n\n // done_cnt\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n done_cnt <= 0;\n end else begin\n if (AESL_done == 1) begin\n if (done_cnt < OUT_TRANSACTION_NUM) begin\n done_cnt <= done_cnt + 1;\n end\n end\n end\n end\n\n initial begin : finish_simulation\n integer fp1;\n integer fp2;\n wait (all_finish == 1);\n // last transaction is saved at negedge right after last done\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n fp1 = $fopen(`SW_DATA_OUT_DAT, \"r\");\n fp2 = $fopen(`HW_DATA_OUT_DAT, \"r\");\n if (fp1 == 0) // Failed to open file\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"\", `SW_DATA_OUT_DAT);\n else if (fp2 == 0) $display(\"Failed to open file \\\\\\\"%s\\\\\\\"\", `HW_DATA_OUT_DAT);\n else begin\n $display(\n \"Comparing \\\\\\\"%s\\\\\\\" with \\\\\\\"%s\\\\\\\"\", `SW_DATA_OUT_DAT, `HW_DATA_OUT_DAT);\n post_check(fp1, fp2);\n end\n $fclose(fp1);\n $fclose(fp2);\n $display(\"Simulation PASS.\");\n $finish;\n end\n\n initial begin\n AESL_clock = 0;\n forever #`AUTOTB_CLOCK_PERIOD_DIV2 AESL_clock = ~AESL_clock;\n end\n\n\n reg end_data_in_V;\n reg [31:0] size_data_in_V;\n reg [31:0] size_data_in_V_backup;\n reg end_data_out_V;\n reg [31:0] size_data_out_V;\n reg [31:0] size_data_out_V_backup;\n\n initial begin : initial_process\n integer proc_rand;\n rst = 1;\n #100;\n repeat (0 + 3) @(posedge AESL_clock);\n rst = 0;\n end\n initial begin : initial_process_for_dut_rst\n integer proc_rand;\n dut_rst = 1;\n #100;\n repeat (3) @(posedge AESL_clock);\n dut_rst = 0;\n end\n initial begin : start_process\n integer proc_rand;\n reg [31:0] start_cnt;\n ce = 1;\n start = 0;\n start_cnt = 0;\n wait (AESL_reset === 0);\n @(posedge AESL_clock);\n #0 start = 1;\n start_cnt = start_cnt + 1;\n forever begin\n if (start_cnt >= OUT_TRANSACTION_NUM + 1) begin\n #0 start = 0;\n end\n @(posedge AESL_clock);\n if (AESL_ready) begin\n start_cnt = start_cnt + 1;\n end\n end\n end\n\n always @(AESL_done) begin\n tb_continue = AESL_done;\n end\n\n initial begin : ready_initial_process\n ready_initial = 0;\n wait (AESL_start === 1);\n ready_initial = 1;\n @(posedge AESL_clock);\n ready_initial = 0;\n end\n\n always @(posedge AESL_clock) begin\n if (AESL_reset) AESL_ready_delay = 0;\n else AESL_ready_delay = AESL_ready;\n end\n initial begin : ready_last_n_process\n ready_last_n = 1;\n wait (ready_cnt == OUT_TRANSACTION_NUM) @(posedge AESL_clock);\n ready_last_n <= 0;\n end\n\n always @(posedge AESL_clock) begin\n if (AESL_reset) ready_delay_last_n = 0;\n else ready_delay_last_n <= ready_last_n;\n end\n assign ready = (ready_initial | AESL_ready_delay);\n assign ready_wire = ready_initial | AESL_ready_delay;\n initial begin : done_delay_last_n_process\n done_delay_last_n = 1;\n while (done_cnt < OUT_TRANSACTION_NUM) @(posedge AESL_clock);\n #0.1;\n done_delay_last_n = 0;\n end\n\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n AESL_done_delay <= 0;\n AESL_done_delay2 <= 0;\n end else begin\n AESL_done_delay <= AESL_done & done_delay_last_n;\n AESL_done_delay2 <= AESL_done_delay;\n end\n end\n always @(posedge AESL_clock) begin\n if (AESL_reset) interface_done = 0;\n else begin\n #0.01;\n if (ready === 1 && ready_cnt > 0 && ready_cnt < OUT_TRANSACTION_NUM) interface_done = 1;\n else if (AESL_done_delay === 1 && done_cnt == OUT_TRANSACTION_NUM) interface_done = 1;\n else interface_done = 0;\n end\n end\n initial begin : proc_gen_data_in_V_internal_ready\n integer internal_trans_num;\n wait (AESL_reset === 0);\n wait (ready_initial === 1);\n data_in_V_ready_reg <= 0;\n @(posedge AESL_clock);\n internal_trans_num = 1;\n while (internal_trans_num != IN_TRANSACTION_NUM + 1) begin\n if (ap_c_n_tvin_trans_num_data_in_V > internal_trans_num) begin\n data_in_V_ready_reg <= 1;\n @(posedge AESL_clock);\n data_in_V_ready_reg <= 0;\n internal_trans_num = internal_trans_num + 1;\n end else begin\n @(posedge AESL_clock);\n end\n end\n data_in_V_ready_reg <= 0;\n end\n\n `define STREAM_SIZE_IN_data_in_V \"{hw_stream_size}\"\n\n initial begin : gen_ap_c_n_tvin_trans_num_data_in_V\n integer fp_data_in_V;\n reg [TOKEN_WIDTH-1:0] token_data_in_V;\n integer ret;\n\n ap_c_n_tvin_trans_num_data_in_V = 0;\n end_data_in_V = 0;\n wait (AESL_reset === 0);\n\n fp_data_in_V = $fopen(`STREAM_SIZE_IN_data_in_V, \"r\");\n if (fp_data_in_V == 0) begin\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", `STREAM_SIZE_IN_data_in_V);\n $finish;\n end\n read_token(fp_data_in_V, token_data_in_V); // should be [[[runtime]]]\n if (token_data_in_V != \"[[[runtime]]]\") begin\n $display(\"ERROR: token_data_in_V != \\\\\\\"[[[runtime]]]\\\\\\\"\");\n $finish;\n end\n size_data_in_V = 0;\n size_data_in_V_backup = 0;\n while (size_data_in_V == 0 && end_data_in_V == 0) begin\n ap_c_n_tvin_trans_num_data_in_V = ap_c_n_tvin_trans_num_data_in_V + 1;\n read_token(fp_data_in_V, token_data_in_V); // should be [[transaction]] or [[[/runtime]]]\n if (token_data_in_V == \"[[transaction]]\") begin\n read_token(fp_data_in_V, token_data_in_V); // should be transaction number\n read_token(fp_data_in_V, token_data_in_V); // should be size for hls::stream\n ret = $sscanf(token_data_in_V, \"%d\", size_data_in_V);\n if (size_data_in_V > 0) begin\n size_data_in_V_backup = size_data_in_V;\n end\n read_token(fp_data_in_V, token_data_in_V); // should be [[/transaction]]\n end else if (token_data_in_V == \"[[[/runtime]]]\") begin\n $fclose(fp_data_in_V);\n end_data_in_V = 1;\n end else begin\n $display(\"ERROR: unknown token_data_in_V\");\n $finish;\n end\n end\n forever begin\n @(posedge AESL_clock);\n if (end_data_in_V == 0) begin\n if (data_in_V_read == 1 && data_in_V_empty_n == 1) begin\n if (size_data_in_V > 0) begin\n size_data_in_V = size_data_in_V - 1;\n while (size_data_in_V == 0 && end_data_in_V == 0) begin\n ap_c_n_tvin_trans_num_data_in_V = ap_c_n_tvin_trans_num_data_in_V + 1;\n read_token(fp_data_in_V,\n token_data_in_V); // should be [[transaction]] or [[[/runtime]]]\n if (token_data_in_V == \"[[transaction]]\") begin\n read_token(fp_data_in_V, token_data_in_V); // should be transaction number\n read_token(fp_data_in_V, token_data_in_V); // should be size for hls::stream\n ret = $sscanf(token_data_in_V, \"%d\", size_data_in_V);\n if (size_data_in_V > 0) begin\n size_data_in_V_backup = size_data_in_V;\n end\n read_token(fp_data_in_V, token_data_in_V); // should be [[/transaction]]\n end else if (token_data_in_V == \"[[[/runtime]]]\") begin\n size_data_in_V = size_data_in_V_backup;\n $fclose(fp_data_in_V);\n end_data_in_V = 1;\n end else begin\n $display(\"ERROR: unknown token_data_in_V\");\n $finish;\n end\n end\n end\n end\n end else begin\n if (data_in_V_read == 1 && data_in_V_empty_n == 1) begin\n if (size_data_in_V > 0) begin\n size_data_in_V = size_data_in_V - 1;\n if (size_data_in_V == 0) begin\n ap_c_n_tvin_trans_num_data_in_V = ap_c_n_tvin_trans_num_data_in_V + 1;\n size_data_in_V = size_data_in_V_backup;\n end\n end\n end\n end\n end\n end\n\n\n reg dump_tvout_finish_data_out_V;\n\n initial begin : dump_tvout_runtime_sign_data_out_V\n integer fp;\n dump_tvout_finish_data_out_V = 0;\n fp = $fopen(`HW_DATA_OUT_DAT, \"w\");\n if (fp == 0) begin\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", `HW_DATA_OUT_DAT);\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n $fdisplay(fp, \"[[[runtime]]]\");\n $fclose(fp);\n wait (done_cnt == OUT_TRANSACTION_NUM);\n // last transaction is saved at negedge right after last done\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n fp = $fopen(`HW_DATA_OUT_DAT, \"a\");\n if (fp == 0) begin\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", `HW_DATA_OUT_DAT);\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n $fdisplay(fp, \"[[[/runtime]]]\");\n $fclose(fp);\n dump_tvout_finish_data_out_V = 1;\n end\n\n\n ////////////////////////////////////////////\n // progress and performance\n ////////////////////////////////////////////\n\n task wait_start();\n while (~AESL_start) begin\n @(posedge AESL_clock);\n end\n endtask\n\n reg [31:0] clk_cnt = 0;\n reg AESL_ready_p1;\n reg AESL_start_p1;\n\n always @(posedge AESL_clock) begin\n if (AESL_reset == 1) begin\n clk_cnt <= 32'h0;\n AESL_ready_p1 <= 1'b0;\n AESL_start_p1 <= 1'b0;\n end else begin\n clk_cnt <= clk_cnt + 1;\n AESL_ready_p1 <= AESL_ready;\n AESL_start_p1 <= AESL_start;\n end\n end\n\n reg [31:0] start_timestamp[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] start_cnt;\n reg [31:0] ready_timestamp[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] ap_ready_cnt;\n reg [31:0] finish_timestamp[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] finish_cnt;\n reg [31:0] lat_total;\n event report_progress;\n\n always @(posedge AESL_clock) begin\n if (finish_cnt == OUT_TRANSACTION_NUM - 1 && AESL_done == 1'b1)\n lat_total = clk_cnt - start_timestamp[0];\n end\n\n initial begin\n start_cnt = 0;\n finish_cnt = 0;\n ap_ready_cnt = 0;\n wait (AESL_reset == 0);\n wait_start();\n start_timestamp[start_cnt] = clk_cnt;\n start_cnt = start_cnt + 1;\n if (AESL_done) begin\n finish_timestamp[finish_cnt] = clk_cnt;\n finish_cnt = finish_cnt + 1;\n end\n ->report_progress;\n forever begin\n @(posedge AESL_clock);\n if (start_cnt < OUT_TRANSACTION_NUM) begin\n if ((AESL_start && AESL_ready_p1) || (AESL_start && ~AESL_start_p1)) begin\n start_timestamp[start_cnt] = clk_cnt;\n start_cnt = start_cnt + 1;\n end\n end\n if (ap_ready_cnt < OUT_TRANSACTION_NUM) begin\n if (AESL_start_p1 && AESL_ready_p1) begin\n ready_timestamp[ap_ready_cnt] = clk_cnt;\n ap_ready_cnt = ap_ready_cnt + 1;\n end\n end\n if (finish_cnt < OUT_TRANSACTION_NUM) begin\n if (AESL_done) begin\n finish_timestamp[finish_cnt] = clk_cnt;\n finish_cnt = finish_cnt + 1;\n end\n end\n ->report_progress;\n end\n end\n\n reg [31:0] progress_timeout;\n\n initial begin : simulation_progress\n real intra_progress;\n wait (AESL_reset == 0);\n progress_timeout = PROGRESS_TIMEOUT;\n $display(\n \"////////////////////////////////////////////////////////////////////////////////////\");\n $display(\"// Inter-Transaction Progress: Completed Transaction / Total Transaction\");\n $display(\"// Intra-Transaction Progress: Measured Latency / Latency Estimation * 100%%\");\n $display(\"//\");\n $display(\n \"// RTL Simulation : \\\\\\\"Inter-Transaction Progress\\\\\\\" [\\\\\\\"Intra-Transaction Progress\\\\\\\"] @ \\\\\\\"Simulation Time\\\\\\\"\");\n $display(\n \"////////////////////////////////////////////////////////////////////////////////////\");\n print_progress();\n while (finish_cnt < OUT_TRANSACTION_NUM) begin\n @(report_progress);\n if (finish_cnt < OUT_TRANSACTION_NUM) begin\n if (AESL_done) begin\n print_progress();\n progress_timeout = PROGRESS_TIMEOUT;\n end else begin\n if (progress_timeout == 0) begin\n print_progress();\n progress_timeout = PROGRESS_TIMEOUT;\n end else begin\n progress_timeout = progress_timeout - 1;\n end\n end\n end\n end\n print_progress();\n $display(\n \"////////////////////////////////////////////////////////////////////////////////////\");\n calculate_performance();\n end\n\n task get_intra_progress(output real intra_progress);\n begin\n if (start_cnt > finish_cnt) begin\n intra_progress = clk_cnt - start_timestamp[finish_cnt];\n end else if (finish_cnt > 0) begin\n intra_progress = LATENCY_ESTIMATION;\n end else begin\n intra_progress = 0;\n end\n intra_progress = intra_progress / LATENCY_ESTIMATION;\n end\n endtask\n\n task print_progress();\n real intra_progress;\n begin\n if (LATENCY_ESTIMATION > 0) begin\n get_intra_progress(intra_progress);\n $display(\"// RTL Simulation : %0d / %0d [%2.2f%%] @ \\\\\\\"%0t\\\\\\\"\", finish_cnt,\n OUT_TRANSACTION_NUM, intra_progress * 100, $time);\n end else begin\n $display(\"// RTL Simulation : %0d / %0d [n/a] @ \\\\\\\"%0t\\\\\\\"\", finish_cnt,\n OUT_TRANSACTION_NUM, $time);\n end\n end\n endtask\n\n task calculate_performance();\n integer i;\n integer fp;\n reg [31:0] latency[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] latency_min;\n reg [31:0] latency_max;\n reg [31:0] latency_total;\n reg [31:0] latency_average;\n reg [31:0] interval[0:OUT_TRANSACTION_NUM - 2];\n reg [31:0] interval_min;\n reg [31:0] interval_max;\n reg [31:0] interval_total;\n reg [31:0] interval_average;\n reg [31:0] total_execute_time;\n begin\n latency_min = -1;\n latency_max = 0;\n latency_total = 0;\n interval_min = -1;\n interval_max = 0;\n interval_total = 0;\n total_execute_time = lat_total;\n\n for (i = 0; i < OUT_TRANSACTION_NUM; i = i + 1) begin\n // calculate latency\n latency[i] = finish_timestamp[i] - start_timestamp[i];\n if (latency[i] > latency_max) latency_max = latency[i];\n if (latency[i] < latency_min) latency_min = latency[i];\n latency_total = latency_total + latency[i];\n // calculate interval\n if (OUT_TRANSACTION_NUM == 1) begin\n interval[i] = 0;\n interval_max = 0;\n interval_min = 0;\n interval_total = 0;\n end else if (i < OUT_TRANSACTION_NUM - 1) begin\n interval[i] = start_timestamp[i+1] - start_timestamp[i];\n if (interval[i] > interval_max) interval_max = interval[i];\n if (interval[i] < interval_min) interval_min = interval[i];\n interval_total = interval_total + interval[i];\n end\n end\n\n latency_average = latency_total / OUT_TRANSACTION_NUM;\n if (OUT_TRANSACTION_NUM == 1) begin\n interval_average = 0;\n end else begin\n interval_average = interval_total / (OUT_TRANSACTION_NUM - 1);\n end\n\n fp = $fopen(`AUTOTB_LAT_RESULT_FILE, \"w\");\n\n $fdisplay(fp, \"$MAX_LATENCY = \\\\\\\"%0d\\\\\\\"\", latency_max);\n $fdisplay(fp, \"$MIN_LATENCY = \\\\\\\"%0d\\\\\\\"\", latency_min);\n $fdisplay(fp, \"$AVER_LATENCY = \\\\\\\"%0d\\\\\\\"\", latency_average);\n $fdisplay(fp, \"$MAX_THROUGHPUT = \\\\\\\"%0d\\\\\\\"\", interval_max);\n $fdisplay(fp, \"$MIN_THROUGHPUT = \\\\\\\"%0d\\\\\\\"\", interval_min);\n $fdisplay(fp, \"$AVER_THROUGHPUT = \\\\\\\"%0d\\\\\\\"\", interval_average);\n $fdisplay(fp, \"$TOTAL_EXECUTE_TIME = \\\\\\\"%0d\\\\\\\"\", total_execute_time);\n\n $fclose(fp);\n\n fp = $fopen(`AUTOTB_PER_RESULT_TRANS_FILE, \"w\");\n\n $fdisplay(fp, \"%20s%16s%16s\", \"\", \"latency\", \"interval\");\n if (OUT_TRANSACTION_NUM == 1) begin\n i = 0;\n $fdisplay(fp, \"transaction%8d:%16d%16d\", i, latency[i], interval[i]);\n end else begin\n for (i = 0; i < OUT_TRANSACTION_NUM; i = i + 1) begin\n if (i < OUT_TRANSACTION_NUM - 1) begin\n $fdisplay(fp, \"transaction%8d:%16d%16d\", i, latency[i], interval[i]);\n end else begin\n $fdisplay(fp, \"transaction%8d:%16d x\", i, latency[i]);\n end\n end\n end\n\n $fclose(fp);\n end\n endtask\n\n\n ////////////////////////////////////////////\n // Dependence Check\n ////////////////////////////////////////////\n\n`ifndef POST_SYN\n\n`endif\n ///////////////////////////////////////////////////////\n // dataflow status monitor\n ///////////////////////////////////////////////////////\n // dataflow_monitor U_dataflow_monitor (\n // .clock (AESL_clock),\n // .reset (rst),\n // .finish(all_finish)\n // );\n\n // `include \"fifo_para.v\"\n\nendmodule\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Top-level test bench emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting top-level test bench failed.\"\n os.system(f\"verible-verilog-format --inplace {out_file}\")" } ]
import math, time, os, logging, torch, glob, shutil from chop.passes.graph.utils import vf, v2p, init_project from chop.passes.graph.transforms.quantize.quantizers import integer_quantizer_for_hw from .emit_tb_data_in import emit_data_in_tb_sv, emit_data_in_tb_dat from .emit_tb_data_out import emit_data_out_tb_sv, emit_data_out_tb_dat from .emit_tb_testbench import emit_top_tb from pathlib import Path
12,382
logger = logging.getLogger(__name__) def emit_tb_verilog(graph, trans_num=1, project_dir="top"): sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) v_dir = os.path.join(sim_dir, "verilog") if not os.path.exists(v_dir): os.mkdir(v_dir) # TODO : need to emit all the inputs v_in_param = graph.nodes_in[0].meta["mase"].parameters["hardware"]["verilog_param"] w_in_param = graph.nodes_in[0].meta["mase"].parameters["common"]["args"] in_width = w_in_param["data_in_0"]["precision"][0] in_size = v_in_param["DATA_IN_0_TENSOR_SIZE_DIM_0"] data_width = in_width * in_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_in.dat") out_file = os.path.join(v_dir, f"top_data_in_fifo.sv") emit_data_in_tb_sv(data_width, load_path, out_file) v_out_param = ( graph.nodes_out[0].meta["mase"].parameters["hardware"]["verilog_param"] ) w_out_param = graph.nodes_in[0].meta["mase"].parameters["common"]["results"] out_width = w_out_param["data_out_0"]["precision"][0] out_size = v_out_param["DATA_OUT_0_TENSOR_SIZE_0_DIM_0"] data_width = out_width * out_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_out.dat") store_path = os.path.join(tv_dir, f"hw_data_out.dat") out_file = os.path.join(v_dir, f"top_data_out_fifo.sv")
logger = logging.getLogger(__name__) def emit_tb_verilog(graph, trans_num=1, project_dir="top"): sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) v_dir = os.path.join(sim_dir, "verilog") if not os.path.exists(v_dir): os.mkdir(v_dir) # TODO : need to emit all the inputs v_in_param = graph.nodes_in[0].meta["mase"].parameters["hardware"]["verilog_param"] w_in_param = graph.nodes_in[0].meta["mase"].parameters["common"]["args"] in_width = w_in_param["data_in_0"]["precision"][0] in_size = v_in_param["DATA_IN_0_TENSOR_SIZE_DIM_0"] data_width = in_width * in_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_in.dat") out_file = os.path.join(v_dir, f"top_data_in_fifo.sv") emit_data_in_tb_sv(data_width, load_path, out_file) v_out_param = ( graph.nodes_out[0].meta["mase"].parameters["hardware"]["verilog_param"] ) w_out_param = graph.nodes_in[0].meta["mase"].parameters["common"]["results"] out_width = w_out_param["data_out_0"]["precision"][0] out_size = v_out_param["DATA_OUT_0_TENSOR_SIZE_0_DIM_0"] data_width = out_width * out_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_out.dat") store_path = os.path.join(tv_dir, f"hw_data_out.dat") out_file = os.path.join(v_dir, f"top_data_out_fifo.sv")
emit_data_out_tb_sv(data_width, load_path, store_path, out_file)
2
2023-12-18 12:50:53+00:00
16k
OPPOMKLab/u-LLaVA
models/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py
[ { "identifier": "box_ops", "path": "models/GroundingDINO/groundingdino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "get_tokenlizer", "path": "models/GroundingDINO/groundingdino/util/get_tokenlizer.py", "snippet": "def get_tokenlizer(text_encoder_type):\n if not isinstance(text_encoder_type, str):\n # print(\"text_encoder_type is not a str\")\n if hasattr(text_encoder_type, \"text_encoder_type\"):\n text_encoder_type = text_encoder_type.text_encoder_type\n elif text_encoder_type.get(\"text_encoder_type\", False):\n text_encoder_type = text_encoder_type.get(\"text_encoder_type\")\n elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):\n pass\n else:\n raise ValueError(\n \"Unknown type of text_encoder_type: {}\".format(type(text_encoder_type))\n )\n print(\"final text_encoder_type: {}\".format(text_encoder_type))\n\n tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)\n return tokenizer" }, { "identifier": "NestedTensor", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == \"auto\":\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\n \"tensors dim must be 3 or 4 but {}({})\".format(\n self.tensors.dim(), self.tensors.shape\n )\n )\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\"tensors.shape\": self.tensors.shape, \"mask.shape\": self.mask.shape}" }, { "identifier": "accuracy", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "inverse_sigmoid", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1 / x2)" }, { "identifier": "is_dist_avail_and_initialized", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "nested_tensor_from_tensor_list", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], : img.shape[2]] = False\n else:\n raise ValueError(\"not supported\")\n return NestedTensor(tensor, mask)" }, { "identifier": "get_phrases_from_posmap", "path": "models/GroundingDINO/groundingdino/util/utils.py", "snippet": "def get_phrases_from_posmap(\n posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255\n):\n assert isinstance(posmap, torch.Tensor), \"posmap must be torch.Tensor\"\n if posmap.dim() == 1:\n posmap[0: left_idx + 1] = False\n posmap[right_idx:] = False\n non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()\n token_ids = [tokenized[\"input_ids\"][i] for i in non_zero_idx]\n return tokenizer.decode(token_ids)\n else:\n raise NotImplementedError(\"posmap must be 1-dim\")" }, { "identifier": "COCOVisualizer", "path": "models/GroundingDINO/groundingdino/util/visualizer.py", "snippet": "class COCOVisualizer:\n def __init__(self, coco=None, tokenlizer=None) -> None:\n self.coco = coco\n\n def visualize(self, img, tgt, caption=None, dpi=180, savedir=\"vis\"):\n \"\"\"\n img: tensor(3, H, W)\n tgt: make sure they are all on cpu.\n must have items: 'image_id', 'boxes', 'size'\n \"\"\"\n plt.figure(dpi=dpi)\n plt.rcParams[\"font.size\"] = \"5\"\n ax = plt.gca()\n img = renorm(img).permute(1, 2, 0)\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n ax.imshow(img)\n\n self.addtgt(tgt)\n\n if tgt is None:\n image_id = 0\n elif \"image_id\" not in tgt:\n image_id = 0\n else:\n image_id = tgt[\"image_id\"]\n\n if caption is None:\n savename = \"{}/{}-{}.png\".format(\n savedir, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n else:\n savename = \"{}/{}-{}-{}.png\".format(\n savedir, caption, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n print(\"savename: {}\".format(savename))\n os.makedirs(os.path.dirname(savename), exist_ok=True)\n plt.savefig(savename)\n plt.close()\n\n def addtgt(self, tgt):\n \"\"\" \"\"\"\n if tgt is None or not \"boxes\" in tgt:\n ax = plt.gca()\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n\n ax.set_axis_off()\n return\n\n ax = plt.gca()\n H, W = tgt[\"size\"]\n numbox = tgt[\"boxes\"].shape[0]\n\n color = []\n polygons = []\n boxes = []\n for box in tgt[\"boxes\"].cpu():\n unnormbbox = box * torch.Tensor([W, H, W, H])\n unnormbbox[:2] -= unnormbbox[2:] / 2\n [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()\n boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n color.append(c)\n\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n\n if \"strings_positive\" in tgt and len(tgt[\"strings_positive\"]) > 0:\n assert (\n len(tgt[\"strings_positive\"]) == numbox\n ), f\"{len(tgt['strings_positive'])} = {numbox}, \"\n for idx, strlist in enumerate(tgt[\"strings_positive\"]):\n cate_id = int(tgt[\"labels\"][idx])\n _string = str(cate_id) + \":\" + \" \".join(strlist)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"box_label\" in tgt:\n assert len(tgt[\"box_label\"]) == numbox, f\"{len(tgt['box_label'])} = {numbox}, \"\n for idx, bl in enumerate(tgt[\"box_label\"]):\n _string = str(bl)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n # plt.figure()\n # rainbow_text(0.0,0.0,\"all unicorns poop rainbows ! ! !\".split(),\n # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])\n\n if \"attn\" in tgt:\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n if isinstance(tgt[\"attn\"], tuple):\n tgt[\"attn\"] = [tgt[\"attn\"]]\n for item in tgt[\"attn\"]:\n attn_map, basergb = item\n attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)\n attn_map = (attn_map * 255).astype(np.uint8)\n cm = ColorMap(basergb)\n heatmap = cm(attn_map)\n ax.imshow(heatmap)\n ax.set_axis_off()\n\n def showAnns(self, anns, draw_bbox=False):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if \"segmentation\" in anns[0] or \"keypoints\" in anns[0]:\n datasetType = \"instances\"\n elif \"caption\" in anns[0]:\n datasetType = \"captions\"\n else:\n raise Exception(\"datasetType not supported\")\n if datasetType == \"instances\":\n ax = plt.gca()\n ax.set_autoscale_on(False)\n polygons = []\n color = []\n for ann in anns:\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n if \"segmentation\" in ann:\n if type(ann[\"segmentation\"]) == list:\n # polygon\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((int(len(seg) / 2), 2))\n polygons.append(Polygon(poly))\n color.append(c)\n else:\n # mask\n t = self.imgs[ann[\"image_id\"]]\n if type(ann[\"segmentation\"][\"counts\"]) == list:\n rle = maskUtils.frPyObjects(\n [ann[\"segmentation\"]], t[\"height\"], t[\"width\"]\n )\n else:\n rle = [ann[\"segmentation\"]]\n m = maskUtils.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n if ann[\"iscrowd\"] == 1:\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n if ann[\"iscrowd\"] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n if \"keypoints\" in ann and type(ann[\"keypoints\"]) == list:\n # turn skeleton into zero-based index\n sks = np.array(self.loadCats(ann[\"category_id\"])[0][\"skeleton\"]) - 1\n kp = np.array(ann[\"keypoints\"])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n for sk in sks:\n if np.all(v[sk] > 0):\n plt.plot(x[sk], y[sk], linewidth=3, color=c)\n plt.plot(\n x[v > 0],\n y[v > 0],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=\"k\",\n markeredgewidth=2,\n )\n plt.plot(\n x[v > 1],\n y[v > 1],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=c,\n markeredgewidth=2,\n )\n\n if draw_bbox:\n [bbox_x, bbox_y, bbox_w, bbox_h] = ann[\"bbox\"]\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n color.append(c)\n\n # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n # ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n elif datasetType == \"captions\":\n for ann in anns:\n print(ann[\"caption\"])" }, { "identifier": "create_positive_map_from_span", "path": "models/GroundingDINO/groundingdino/util/vl_utils.py", "snippet": "def create_positive_map_from_span(tokenized, token_span, max_text_len=256):\n \"\"\"construct a map such that positive_map[i,j] = True iff box i is associated to token j\n Input:\n - tokenized:\n - input_ids: Tensor[1, ntokens]\n - attention_mask: Tensor[1, ntokens]\n - token_span: list with length num_boxes.\n - each item: [start_idx, end_idx]\n \"\"\"\n positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)\n for j, tok_list in enumerate(token_span):\n for (beg, end) in tok_list:\n beg_pos = tokenized.char_to_token(beg)\n end_pos = tokenized.char_to_token(end - 1)\n if beg_pos is None:\n try:\n beg_pos = tokenized.char_to_token(beg + 1)\n if beg_pos is None:\n beg_pos = tokenized.char_to_token(beg + 2)\n except:\n beg_pos = None\n if end_pos is None:\n try:\n end_pos = tokenized.char_to_token(end - 2)\n if end_pos is None:\n end_pos = tokenized.char_to_token(end - 3)\n except:\n end_pos = None\n if beg_pos is None or end_pos is None:\n continue\n\n assert beg_pos is not None and end_pos is not None\n if os.environ.get(\"SHILONG_DEBUG_ONLY_ONE_POS\", None) == \"TRUE\":\n positive_map[j, beg_pos] = 1\n break\n else:\n positive_map[j, beg_pos : end_pos + 1].fill_(1)\n\n return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/GroundingDINO/groundingdino/models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry(\"model build functions\")" }, { "identifier": "build_backbone", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone:\n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords:\n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = True\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]\n args.backbone_freeze_keywords\n use_checkpoint = getattr(args, \"use_checkpoint\", False)\n\n if args.backbone in [\"resnet50\", \"resnet101\"]:\n backbone = Backbone(\n args.backbone,\n train_backbone,\n args.dilation,\n return_interm_indices,\n batch_norm=FrozenBatchNorm2d,\n )\n bb_num_channels = backbone.num_channels\n elif args.backbone in [\n \"swin_T_224_1k\",\n \"swin_B_224_22k\",\n \"swin_B_384_22k\",\n \"swin_L_224_22k\",\n \"swin_L_384_22k\",\n ]:\n pretrain_img_size = int(args.backbone.split(\"_\")[-2])\n backbone = build_swin_transformer(\n args.backbone,\n pretrain_img_size=pretrain_img_size,\n out_indices=tuple(return_interm_indices),\n dilation=False,\n use_checkpoint=use_checkpoint,\n )\n\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n\n assert len(bb_num_channels) == len(\n return_interm_indices\n ), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels\n assert isinstance(\n bb_num_channels, List\n ), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n # import ipdb; ipdb.set_trace()\n return model" }, { "identifier": "BertModelWarper", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py", "snippet": "class BertModelWarper(nn.Module):\n def __init__(self, bert_model):\n super().__init__()\n # self.bert = bert_modelc\n\n self.config = bert_model.config\n self.embeddings = bert_model.embeddings\n self.encoder = bert_model.encoder\n self.pooler = bert_model.pooler\n\n self.get_extended_attention_mask = bert_model.get_extended_attention_mask\n self.invert_attention_mask = bert_model.invert_attention_mask\n self.get_head_mask = bert_model.get_head_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = (\n output_attentions if output_attentions is not None else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = (\n past_key_values[0][0].shape[2] if past_key_values is not None else 0\n )\n\n if attention_mask is None:\n attention_mask = torch.ones(\n ((batch_size, seq_length + past_key_values_length)), device=device\n )\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )" }, { "identifier": "generate_masks_with_special_tokens", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n\n previous_col = col\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long)" }, { "identifier": "generate_masks_with_special_tokens_and_transfer_map", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n cate_to_token_mask_list = [[] for _ in range(bs)]\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()\n c2t_maski[previous_col + 1 : col] = True\n cate_to_token_mask_list[row].append(c2t_maski)\n previous_col = col\n\n cate_to_token_mask_list = [\n torch.stack(cate_to_token_mask_listi, dim=0)\n for cate_to_token_mask_listi in cate_to_token_mask_list\n ]\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list" }, { "identifier": "build_transformer", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/transformer.py", "snippet": "def build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n learnable_tgt_init=True,\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n embed_init_tgt=args.embed_init_tgt,\n use_text_enhancer=args.use_text_enhancer,\n use_fusion_layer=args.use_fusion_layer,\n use_checkpoint=args.use_checkpoint,\n use_transformer_ckpt=args.use_transformer_ckpt,\n use_text_cross_attention=args.use_text_cross_attention,\n text_dropout=args.text_dropout,\n fusion_dropout=args.fusion_dropout,\n fusion_droppath=args.fusion_droppath,\n )" }, { "identifier": "MLP", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\"Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(\n nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])\n )\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "ContrastiveEmbed", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py", "snippet": "class ContrastiveEmbed(nn.Module):\n def __init__(self, max_text_len=256):\n \"\"\"\n Args:\n max_text_len: max length of text.\n \"\"\"\n super().__init__()\n self.max_text_len = max_text_len\n\n def forward(self, x, text_dict):\n \"\"\"_summary_\n\n Args:\n x (_type_): _description_\n text_dict (_type_): _description_\n {\n 'encoded_text': encoded_text, # bs, 195, d_model\n 'text_token_mask': text_token_mask, # bs, 195\n # True for used tokens. False for padding tokens\n }\n Returns:\n _type_: _description_\n \"\"\"\n assert isinstance(text_dict, dict)\n\n y = text_dict[\"encoded_text\"]\n text_token_mask = text_dict[\"text_token_mask\"]\n\n res = x @ y.transpose(-1, -2)\n res.masked_fill_(~text_token_mask[:, None, :], float(\"-inf\"))\n\n # padding to max_text_len\n new_res = torch.full((*res.shape[:-1], self.max_text_len), float(\"-inf\"), device=res.device)\n new_res[..., : res.shape[-1]] = res\n\n return new_res" }, { "identifier": "sigmoid_focal_loss", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py", "snippet": "def sigmoid_focal_loss(\n inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False\n):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if no_reduction:\n return loss\n\n return loss.mean(1).sum() / num_boxes" } ]
import copy import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast from models.GroundingDINO.groundingdino.util import box_ops, get_tokenlizer from models.GroundingDINO.groundingdino.util.misc import ( NestedTensor, accuracy, get_world_size, interpolate, inverse_sigmoid, is_dist_avail_and_initialized, nested_tensor_from_tensor_list, ) from models.GroundingDINO.groundingdino.util.utils import get_phrases_from_posmap from models.GroundingDINO.groundingdino.util.visualizer import COCOVisualizer from models.GroundingDINO.groundingdino.util.vl_utils import create_positive_map_from_span from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone from .bertwarper import ( BertModelWarper, generate_masks_with_special_tokens, generate_masks_with_special_tokens_and_transfer_map, ) from .transformer import build_transformer from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss
10,986
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed
# ------------------------------------------------------------------------ # Grounding DINO # url: https://github.com/IDEA-Research/GroundingDINO # Copyright (c) 2023 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed
_class_embed = ContrastiveEmbed()
19
2023-12-21 08:10:23+00:00
16k
chinhsuanwu/ifusion
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config, **kwargs):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**kwargs, **config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n # for i, step in enumerate(iterator):\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities import rank_zero_only from omegaconf import ListConfig from ldm.util import ( log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config, ) from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import ( normal_kl, DiagonalGaussianDistribution, ) from ldm.models.autoencoder import ( VQModelInterface, IdentityFirstStage, AutoencoderKL, ) from ldm.modules.diffusionmodules.util import ( make_beta_schedule, extract_into_tensor, noise_like, ) from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
12,105
log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(self.first_stage_model, IdentityFirstStage) ): # also display when quantizing x0 while sampling with ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, quantize_denoised=True, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_x0_quantized"] = x_samples if unconditional_guidance_scale > 1.0: uc = self.get_unconditional_conditioning( N, unconditional_guidance_label, image_size=x.shape[-1] ) # uc = torch.zeros_like(c) with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[ f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" ] = x_samples_cfg if inpaint: # make a simple center square b, h, w = z.shape[0], z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0 mask = mask[:, None, ...] with ema_scope("Plotting Inpaint"): samples, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask, ) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_inpainting"] = x_samples log["mask"] = mask # outpaint mask = 1.0 - mask with ema_scope("Plotting Outpaint"): samples, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask, ) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_outpainting"] = x_samples if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising( c, shape=(self.channels, self.image_size, self.image_size), batch_size=N, ) prog_row = self._get_denoise_row_from_list( progressives, desc="Progressive Generation" ) log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = [] if self.unet_trainable == "attn": print("Training only unet attention layers") for n, m in self.model.named_modules():
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image_target", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") elif self.loss_type == "smooth_l1": if mean: loss = torch.nn.functional.smooth_l1_loss(target, pred) else: loss = torch.nn.functional.smooth_l1_loss( target, pred, reduction="none" ) else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image_cond", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, step_ratio=None, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c, step_ratio=step_ratio) return loss def forward(self, x, c, step_ratio=None, *args, **kwargs): if step_ratio is not None: t = np.round((1 - step_ratio) * self.num_timesteps).clip(0, self.num_timesteps - 1) t = torch.full((x.shape[0],), t, dtype=torch.long, device=self.device) else: t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) if self.logvar.device != self.device: self.logvar = self.logvar.to(self.device) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(self.first_stage_model, IdentityFirstStage) ): # also display when quantizing x0 while sampling with ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, quantize_denoised=True, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_x0_quantized"] = x_samples if unconditional_guidance_scale > 1.0: uc = self.get_unconditional_conditioning( N, unconditional_guidance_label, image_size=x.shape[-1] ) # uc = torch.zeros_like(c) with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[ f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" ] = x_samples_cfg if inpaint: # make a simple center square b, h, w = z.shape[0], z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0 mask = mask[:, None, ...] with ema_scope("Plotting Inpaint"): samples, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask, ) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_inpainting"] = x_samples log["mask"] = mask # outpaint mask = 1.0 - mask with ema_scope("Plotting Outpaint"): samples, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask, ) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_outpainting"] = x_samples if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising( c, shape=(self.channels, self.image_size, self.image_size), batch_size=N, ) prog_row = self._get_denoise_row_from_list( progressives, desc="Progressive Generation" ) log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = [] if self.unet_trainable == "attn": print("Training only unet attention layers") for n, m in self.model.named_modules():
if isinstance(m, CrossAttention) and n.endswith("attn2"):
18
2023-12-17 12:45:38+00:00
16k
wangzhecheng/SkyScript
customized_train_and_test.py
[ { "identifier": "create_model_and_transforms", "path": "src/open_clip/factory.py", "snippet": "def create_model_and_transforms(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool = False,\n force_quick_gelu: bool = False,\n force_custom_text: bool = False,\n force_patch_dropout: Optional[float] = None,\n force_image_size: Optional[Union[int, Tuple[int, int]]] = None,\n pretrained_image: bool = False,\n pretrained_hf: bool = True,\n image_mean: Optional[Tuple[float, ...]] = None,\n image_std: Optional[Tuple[float, ...]] = None,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n cache_dir: Optional[str] = None,\n output_dict: Optional[bool] = None,\n):\n model = create_model(\n model_name,\n pretrained,\n precision=precision,\n device=device,\n jit=jit,\n force_quick_gelu=force_quick_gelu,\n force_custom_text=force_custom_text,\n force_patch_dropout=force_patch_dropout,\n force_image_size=force_image_size,\n pretrained_image=pretrained_image,\n pretrained_hf=pretrained_hf,\n cache_dir=cache_dir,\n output_dict=output_dict,\n )\n\n image_mean = image_mean or getattr(model.visual, 'image_mean', None)\n image_std = image_std or getattr(model.visual, 'image_std', None)\n preprocess_train = image_transform(\n model.visual.image_size,\n is_train=True,\n mean=image_mean,\n std=image_std,\n aug_cfg=aug_cfg,\n )\n preprocess_val = image_transform(\n model.visual.image_size,\n is_train=False,\n mean=image_mean,\n std=image_std,\n )\n\n return model, preprocess_train, preprocess_val" }, { "identifier": "get_tokenizer", "path": "src/open_clip/factory.py", "snippet": "def get_tokenizer(model_name):\n if model_name.startswith(HF_HUB_PREFIX):\n tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])\n else:\n config = get_model_config(model_name)\n tokenizer = HFTokenizer(\n config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize\n return tokenizer" }, { "identifier": "create_loss", "path": "src/open_clip/factory.py", "snippet": "def create_loss(args):\n if args.distill:\n return DistillClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n elif \"coca\" in args.model.lower():\n return CoCaLoss(\n caption_loss_weight=args.coca_caption_loss_weight,\n clip_loss_weight=args.coca_contrastive_loss_weight,\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n return ClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )" }, { "identifier": "trace_model", "path": "src/open_clip/model.py", "snippet": "def trace_model(model, batch_size=256, device=torch.device('cpu')):\n model.eval()\n image_size = model.visual.image_size\n example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)\n example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)\n model = torch.jit.trace_module(\n model,\n inputs=dict(\n forward=(example_images, example_text),\n encode_text=(example_text,),\n encode_image=(example_images,)\n ))\n model.visual.image_size = image_size\n return model" }, { "identifier": "get_data", "path": "src/training/data.py", "snippet": "def get_data(args, preprocess_fns, epoch=0, tokenizer=None):\n preprocess_train, preprocess_val = preprocess_fns\n data = {}\n\n if args.train_data or args.dataset_type == \"synthetic\":\n data[\"train\"] = get_dataset_fn(args.train_data, args.dataset_type)(\n args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer)\n\n if args.val_data:\n data[\"val\"] = get_dataset_fn(args.val_data, args.dataset_type)(\n args, preprocess_val, is_train=False, tokenizer=tokenizer)\n\n if args.imagenet_val is not None:\n data[\"imagenet-val\"] = get_imagenet(args, preprocess_fns, \"val\")\n\n if args.imagenet_v2 is not None:\n data[\"imagenet-v2\"] = get_imagenet(args, preprocess_fns, \"v2\")\n\n return data" }, { "identifier": "is_master", "path": "src/training/distributed.py", "snippet": "def is_master(args, local=False):\n return is_local_master(args) if local else is_global_master(args)" }, { "identifier": "init_distributed_device", "path": "src/training/distributed.py", "snippet": "def init_distributed_device(args):\n # Distributed training = training on more than one GPU.\n # Works in both single and multi-node scenarios.\n args.distributed = False\n args.world_size = 1\n args.rank = 0 # global rank\n args.local_rank = 0\n if args.horovod:\n assert hvd is not None, \"Horovod is not installed\"\n hvd.init()\n args.local_rank = int(hvd.local_rank())\n args.rank = hvd.rank()\n args.world_size = hvd.size()\n args.distributed = True\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n elif is_using_distributed():\n if 'SLURM_PROCID' in os.environ:\n # DDP via SLURM\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n # SLURM var -> torch.distributed vars in case needed\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n else:\n # DDP via torchrun, torch.distributed.launch\n args.local_rank, _, _ = world_info_from_env()\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url)\n args.world_size = torch.distributed.get_world_size()\n args.rank = torch.distributed.get_rank()\n args.distributed = True\n\n if torch.cuda.is_available():\n if args.distributed and not args.no_set_device_rank:\n device = 'cuda:%d' % args.local_rank\n else:\n device = 'cuda:0'\n torch.cuda.set_device(device)\n else:\n device = 'cpu'\n args.device = device\n device = torch.device(device)\n return device" }, { "identifier": "broadcast_object", "path": "src/training/distributed.py", "snippet": "def broadcast_object(args, obj, src=0):\n # broadcast a pickle-able python object from rank-0 to all ranks\n if args.horovod:\n return hvd.broadcast_object(obj, root_rank=src)\n else:\n if args.rank == src:\n objects = [obj]\n else:\n objects = [None]\n dist.broadcast_object_list(objects, src=src)\n return objects[0]" }, { "identifier": "setup_logging", "path": "src/training/logger.py", "snippet": "def setup_logging(log_file, level, include_host=False):\n if include_host:\n import socket\n hostname = socket.gethostname()\n formatter = logging.Formatter(\n f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n else:\n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n\n logging.root.setLevel(level)\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n for logger in loggers:\n logger.setLevel(level)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logging.root.addHandler(stream_handler)\n\n if log_file:\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(formatter)\n logging.root.addHandler(file_handler)" }, { "identifier": "cosine_lr", "path": "src/training/scheduler.py", "snippet": "def cosine_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n e = step - warmup_length\n es = steps - warmup_length\n lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "const_lr", "path": "src/training/scheduler.py", "snippet": "def const_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n lr = base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "const_lr_cooldown", "path": "src/training/scheduler.py", "snippet": "def const_lr_cooldown(optimizer, base_lr, warmup_length, steps, cooldown_steps, cooldown_power=1.0, cooldown_end_lr=0.):\n def _lr_adjuster(step):\n start_cooldown_step = steps - cooldown_steps\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n if step < start_cooldown_step:\n lr = base_lr\n else:\n e = step - start_cooldown_step\n es = steps - start_cooldown_step\n # linear decay if power == 1; polynomial decay otherwise;\n decay = (1 - (e/es)) ** cooldown_power\n lr = decay * (base_lr - cooldown_end_lr) + cooldown_end_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "train_one_epoch", "path": "src/training/train.py", "snippet": "def train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args, tb_writer=None):\n device = torch.device(args.device)\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n\n model.train()\n if args.distill:\n dist_model.eval()\n\n data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch\n dataloader = data['train'].dataloader\n num_batches_per_epoch = dataloader.num_batches // args.accum_freq\n sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))\n\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n losses_m = {}\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n for i, batch in enumerate(dataloader):\n i_accum = i // args.accum_freq\n step = num_batches_per_epoch * epoch + i_accum\n\n if not args.skip_scheduler:\n scheduler(step)\n\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n data_time_m.update(time.time() - end)\n optimizer.zero_grad()\n\n if args.accum_freq == 1:\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out[\"logit_scale\"]\n if args.distill:\n with torch.no_grad():\n dist_model_out = dist_model(images, texts)\n model_out.update({f'dist_{k}' : v for k, v in dist_model_out.items()})\n losses = loss(**model_out, output_dict=True)\n\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n\n backward(total_loss, scaler)\n else:\n # First, cache the features without any gradient tracking.\n with torch.no_grad():\n with autocast():\n model_out = model(images, texts)\n model_out.pop(\"logit_scale\")\n for key, val in model_out.items():\n if key in accum_features:\n accum_features[key].append(val)\n else:\n accum_features[key] = [val]\n\n accum_images.append(images)\n accum_texts.append(texts)\n\n # If (i + 1) % accum_freq is not zero, move on to the next batch.\n if ((i + 1) % args.accum_freq) > 0:\n # FIXME this makes data time logging unreliable when accumulating\n continue\n\n # Now, ready to take gradients for the last accum_freq batches.\n # Re-do the forward pass for those batches, and use the cached features from the other batches as negatives.\n # Call backwards each time, but only step optimizer at the end.\n optimizer.zero_grad()\n for j in range(args.accum_freq):\n images = accum_images[j]\n texts = accum_texts[j]\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out.pop(\"logit_scale\")\n inputs = {}\n for key, val in accum_features.items():\n accumulated = accum_features[key]\n inputs[key] = torch.cat(accumulated[:j] + [model_out[key]] + accumulated[j + 1:])\n losses = loss(**inputs, logit_scale=logit_scale, output_dict=True)\n del inputs\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n backward(total_loss, scaler)\n\n if scaler is not None:\n if args.horovod:\n optimizer.synchronize()\n scaler.unscale_(optimizer)\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n with optimizer.skip_synchronize():\n scaler.step(optimizer)\n else:\n if args.grad_clip_norm is not None:\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n scaler.step(optimizer)\n scaler.update()\n else:\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n optimizer.step()\n\n # reset gradient accum, if enabled\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n # Note: we clamp to 4.6052 = ln(100), as in the original paper.\n with torch.no_grad():\n unwrap_model(model).logit_scale.clamp_(0, math.log(100))\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i_accum + 1\n if is_master(args) and (i_accum % args.log_every_n_steps == 0 or batch_count == num_batches_per_epoch):\n batch_size = len(images)\n num_samples = batch_count * batch_size * args.accum_freq * args.world_size\n samples_per_epoch = dataloader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n for key, val in losses.items():\n if key not in losses_m:\n losses_m[key] = AverageMeter()\n losses_m[key].update(val.item(), batch_size)\n\n logit_scale_scalar = logit_scale.item()\n loss_log = \" \".join(\n [\n f\"{loss_name.capitalize()}: {loss_m.val:#.5g} ({loss_m.avg:#.5g})\" \n for loss_name, loss_m in losses_m.items()\n ]\n )\n samples_per_second = args.accum_freq * args.batch_size * args.world_size / batch_time_m.val\n samples_per_second_per_gpu = args.accum_freq * args.batch_size / batch_time_m.val\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {samples_per_second:#g}/s, {samples_per_second_per_gpu:#g}/s/gpu \"\n f\"LR: {optimizer.param_groups[0]['lr']:5f} \"\n f\"Logit Scale: {logit_scale_scalar:.3f} \" + loss_log\n )\n\n # Save train loss / etc. Using non avg meter values as loggers have their own smoothing\n log_data = {\n \"data_time\": data_time_m.val,\n \"batch_time\": batch_time_m.val,\n \"samples_per_second\": samples_per_second,\n \"samples_per_second_per_gpu\": samples_per_second_per_gpu,\n \"scale\": logit_scale_scalar,\n \"lr\": optimizer.param_groups[0][\"lr\"]\n } \n log_data.update({name:val.val for name,val in losses_m.items()})\n\n for name, val in log_data.items():\n name = \"train/\" + name\n if tb_writer is not None:\n tb_writer.add_scalar(name, val, step)\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n wandb.log({name: val, 'step': step})\n\n # resetting batch / data time meters per log window\n batch_time_m.reset()\n data_time_m.reset()\n # end for" }, { "identifier": "evaluate", "path": "src/training/train.py", "snippet": "def evaluate(model, data, epoch, args, tb_writer=None):\n metrics = {}\n if not is_master(args):\n return metrics\n device = torch.device(args.device)\n model.eval()\n\n zero_shot_metrics = zero_shot_eval(model, data, epoch, args)\n metrics.update(zero_shot_metrics)\n\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):\n dataloader = data['val'].dataloader\n num_samples = 0\n samples_per_val = dataloader.num_samples\n\n # FIXME this does not scale past small eval datasets\n # all_image_features @ all_text_features will blow up memory and compute very quickly\n cumulative_loss = 0.0\n cumulative_gen_loss = 0.0\n all_image_features, all_text_features = [], []\n with torch.no_grad():\n for i, batch in enumerate(dataloader):\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n with autocast():\n model_out = model(images, texts)\n image_features = model_out[\"image_features\"]\n text_features = model_out[\"text_features\"]\n logit_scale = model_out[\"logit_scale\"]\n # features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly\n # however, system RAM is easily exceeded and compute time becomes problematic\n all_image_features.append(image_features.cpu())\n all_text_features.append(text_features.cpu())\n logit_scale = logit_scale.mean()\n logits_per_image = logit_scale * image_features @ text_features.t()\n logits_per_text = logits_per_image.t()\n\n batch_size = images.shape[0]\n labels = torch.arange(batch_size, device=device).long()\n total_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n gen_loss = maybe_compute_generative_loss(model_out)\n\n cumulative_loss += total_loss * batch_size\n num_samples += batch_size\n if is_master(args) and (i % 100) == 0:\n logging.info(\n f\"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\\t\"\n f\"Clip Loss: {cumulative_loss / num_samples:.6f}\\t\")\n\n if gen_loss is not None:\n cumulative_gen_loss += gen_loss * batch_size\n logging.info(\n f\"Generative Loss: {cumulative_gen_loss / num_samples:.6f}\\t\")\n\n val_metrics = get_clip_metrics(\n image_features=torch.cat(all_image_features),\n text_features=torch.cat(all_text_features),\n logit_scale=logit_scale.cpu(),\n )\n loss = cumulative_loss / num_samples\n metrics.update(\n {**val_metrics, \"clip_val_loss\": loss.item(), \"epoch\": epoch, \"num_samples\": num_samples}\n )\n if gen_loss is not None:\n gen_loss = cumulative_gen_loss / num_samples\n metrics.update({\"val_generative_loss\": gen_loss.item()})\n\n if not metrics:\n return metrics\n\n logging.info(\n f\"Eval Epoch: {epoch} \"\n + \"\\t\".join([f\"{k}: {round(v, 4):.4f}\" for k, v in metrics.items()])\n )\n\n if args.save_logs:\n for name, val in metrics.items():\n if tb_writer is not None:\n tb_writer.add_scalar(f\"val/{name}\", val, epoch)\n\n with open(os.path.join(args.checkpoint_path, \"results.jsonl\"), \"a+\") as f:\n f.write(json.dumps(metrics))\n f.write(\"\\n\")\n\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n for name, val in metrics.items():\n wandb.log({f\"val/{name}\": val, 'epoch': epoch})\n\n return metrics" }, { "identifier": "pt_load", "path": "src/training/file_utils.py", "snippet": "def pt_load(file_path, map_location=None):\n if file_path.startswith('s3'):\n logging.info('Loading remote checkpoint, which may take a bit.')\n of = fsspec.open(file_path, \"rb\")\n with of as f:\n out = torch.load(f, map_location=map_location)\n return out" }, { "identifier": "check_exists", "path": "src/training/file_utils.py", "snippet": "def check_exists(file_path):\n try:\n with fsspec.open(file_path):\n pass\n except FileNotFoundError:\n return False\n return True" }, { "identifier": "start_sync_process", "path": "src/training/file_utils.py", "snippet": "def start_sync_process(sync_every, local_dir, remote_dir, protocol):\n p = multiprocessing.Process(target=keep_running_remote_sync, args=(sync_every, local_dir, remote_dir, protocol))\n return p" }, { "identifier": "remote_sync", "path": "src/training/file_utils.py", "snippet": "def remote_sync(local_dir, remote_dir, protocol):\n logging.info('Starting remote sync.')\n if protocol == 's3':\n return remote_sync_s3(local_dir, remote_dir)\n elif protocol == 'fsspec':\n return remote_sync_fsspec(local_dir, remote_dir)\n else:\n logging.error('Remote protocol not known')\n return False" }, { "identifier": "natural_key", "path": "src/training/main.py", "snippet": "def natural_key(string_):\n \"\"\"See http://www.codinghorror.com/blog/archives/001018.html\"\"\"\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_.lower())]" }, { "identifier": "get_latest_checkpoint", "path": "src/training/main.py", "snippet": "def get_latest_checkpoint(path: str, remote : bool):\n # as writen, this glob recurses, so can pick up checkpoints across multiple sub-folders\n if remote:\n result = subprocess.run([\"aws\", \"s3\", \"ls\", path + \"/\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(result)\n if result.returncode == 1:\n return None\n checkpoints = [os.path.join(path, x.split(' ')[-1]) for x in result.stdout.decode().split('\\n')[:-1]]\n else:\n checkpoints = glob.glob(path + '**/*.pt', recursive=True)\n if checkpoints:\n checkpoints = sorted(checkpoints, key=natural_key)\n return checkpoints[-1]\n return None" }, { "identifier": "copy_codebase", "path": "src/training/main.py", "snippet": "def copy_codebase(args):\n from shutil import copytree, ignore_patterns\n new_code_path = os.path.join(args.logs, args.name, \"code\")\n if os.path.exists(new_code_path):\n print(\n f\"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment.\"\n )\n return -1\n print(f\"Copying codebase to {new_code_path}\")\n current_code_path = os.path.realpath(__file__)\n for _ in range(3):\n current_code_path = os.path.dirname(current_code_path)\n copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))\n print(\"Done copying code.\")\n return 1" }, { "identifier": "parse_args", "path": "params.py", "snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--root-data-dir\",\n type=str,\n default=None,\n help=\"Root directory to datasets\",\n )\n parser.add_argument(\n \"--train-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with training data. When using webdataset, multiple datasources can be combined using the `::` separator.\",\n )\n parser.add_argument(\n \"--train-data-upsampling-factors\",\n type=str,\n default=None,\n help=(\n \"When using multiple data sources with webdataset and sampling with replacement, this can be used to upsample specific data sources. \"\n \"Similar to --train-data, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) \"\n \"By default, datapoints are sampled uniformly regardless of the dataset sizes.\"\n )\n )\n parser.add_argument(\n \"--val-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with validation data\",\n )\n parser.add_argument(\n \"--train-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Required for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--val-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Useful for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--dataset-type\",\n choices=[\"webdataset\", \"csv\", \"synthetic\", \"auto\"],\n default=\"auto\",\n help=\"Which type of dataset to process.\"\n )\n parser.add_argument(\n \"--dataset-resampled\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use sampling with replacement for webdataset shard selection.\"\n )\n parser.add_argument(\n \"--csv-separator\",\n type=str,\n default=\"\\t\",\n help=\"For csv-like datasets, which separator to use.\"\n )\n parser.add_argument(\n \"--csv-img-key\",\n type=str,\n default=\"filepath\",\n help=\"For csv-like datasets, the name of the key for the image paths.\"\n )\n parser.add_argument(\n \"--csv-caption-key\",\n type=str,\n default=\"title\",\n help=\"For csv-like datasets, the name of the key for the captions.\"\n )\n parser.add_argument(\n \"--imagenet-val\",\n type=str,\n default=None,\n help=\"Path to imagenet val set for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--imagenet-v2\",\n type=str,\n default=None,\n help=\"Path to imagenet v2 for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--logs\",\n type=str,\n default=\"./logs/\",\n help=\"Where to store tensorboard logs. Use None to avoid storing logs.\",\n )\n parser.add_argument(\n \"--log-local\",\n action=\"store_true\",\n default=False,\n help=\"log files on local master, otherwise global master only.\",\n )\n parser.add_argument(\n \"--name\",\n type=str,\n default=None,\n help=\"Optional identifier for the experiment when storing logs. Otherwise use current time.\",\n )\n parser.add_argument(\n \"--workers\", type=int, default=1, help=\"Number of dataloader workers per GPU.\"\n )\n parser.add_argument(\n \"--batch-size\", type=int, default=64, help=\"Batch size per GPU.\"\n )\n parser.add_argument(\n \"--epochs\", type=int, default=32, help=\"Number of epochs to train for.\"\n )\n parser.add_argument(\n \"--epochs-cooldown\", type=int, default=None,\n help=\"When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards.\"\n )\n parser.add_argument(\"--lr\", type=float, default=None, help=\"Learning rate.\")\n parser.add_argument(\"--beta1\", type=float, default=None, help=\"Adam beta 1.\")\n parser.add_argument(\"--beta2\", type=float, default=None, help=\"Adam beta 2.\")\n parser.add_argument(\"--eps\", type=float, default=None, help=\"Adam epsilon.\")\n parser.add_argument(\"--wd\", type=float, default=0.2, help=\"Weight decay.\")\n parser.add_argument(\n \"--warmup\", type=int, default=10000, help=\"Number of steps to warmup for.\"\n )\n parser.add_argument(\n \"--use-bn-sync\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use batch norm sync.\")\n parser.add_argument(\n \"--skip-scheduler\",\n action=\"store_true\",\n default=False,\n help=\"Use this flag to skip the learning rate decay.\",\n )\n parser.add_argument(\n \"--lr-scheduler\",\n type=str,\n default='cosine',\n help=\"LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine\",\n )\n parser.add_argument(\n \"--lr-cooldown-end\", type=float, default=0.0,\n help=\"End learning rate for cooldown schedule. Default: 0\"\n )\n parser.add_argument(\n \"--lr-cooldown-power\", type=float, default=1.0,\n help=\"Power for polynomial cooldown schedule. Default: 1.0 (linear decay)\"\n )\n parser.add_argument(\n \"--save-frequency\", type=int, default=1, help=\"How often to save checkpoints.\"\n )\n parser.add_argument(\n \"--save-most-recent\",\n action=\"store_true\",\n default=False,\n help=\"Always save the most recent model trained to epoch_latest.pt.\",\n )\n parser.add_argument(\n \"--zeroshot-frequency\", type=int, default=2, help=\"How often to run zero shot.\"\n )\n parser.add_argument(\n \"--val-frequency\", type=int, default=1, help=\"How often to run evaluation with val data.\"\n )\n parser.add_argument(\n \"--resume\",\n default=None,\n type=str,\n help=\"path to latest checkpoint (default: none)\",\n )\n parser.add_argument(\n \"--precision\",\n choices=[\"amp\", \"amp_bf16\", \"amp_bfloat16\", \"bf16\", \"fp16\", \"fp32\"],\n default=\"amp\",\n help=\"Floating point precision.\"\n )\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"RN50\",\n help=\"Name of the vision backbone to use.\",\n )\n parser.add_argument(\n \"--pretrained\",\n default='',\n type=str,\n help=\"Use a pretrained CLIP model weights with the specified tag or file path.\",\n )\n parser.add_argument(\n \"--pretrained-image\",\n default=False,\n action='store_true',\n help=\"Load imagenet pretrained weights for image tower backbone if available.\",\n )\n parser.add_argument(\n \"--lock-image\",\n default=False,\n action='store_true',\n help=\"Lock full image tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-image-unlocked-groups\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-image-freeze-bn-stats\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n '--image-mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override default image mean value of dataset')\n parser.add_argument(\n '--image-std', type=float, nargs='+', default=None, metavar='STD',\n help='Override default image std deviation of of dataset')\n parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)\n parser.add_argument(\n \"--grad-checkpointing\",\n default=False,\n action='store_true',\n help=\"Enable gradient checkpointing.\",\n )\n parser.add_argument(\n \"--local-loss\",\n default=False,\n action=\"store_true\",\n help=\"calculate loss w/ local features @ global (instead of realizing full global @ global matrix)\"\n )\n parser.add_argument(\n \"--gather-with-grad\",\n default=False,\n action=\"store_true\",\n help=\"enable full distributed gradient for feature gather\"\n )\n parser.add_argument(\n '--force-image-size', type=int, nargs='+', default=None,\n help='Override default image size'\n )\n parser.add_argument(\n \"--force-quick-gelu\",\n default=False,\n action='store_true',\n help=\"Force use of QuickGELU activation for non-OpenAI transformer models.\",\n )\n parser.add_argument(\n \"--force-patch-dropout\",\n default=None,\n type=float,\n help=\"Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper\",\n )\n parser.add_argument(\n \"--force-custom-text\",\n default=False,\n action='store_true',\n help=\"Force use of CustomTextCLIP model (separate text-tower).\",\n )\n parser.add_argument(\n \"--torchscript\",\n default=False,\n action='store_true',\n help=\"torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'\",\n )\n parser.add_argument(\n \"--trace\",\n default=False,\n action='store_true',\n help=\"torch.jit.trace the model for inference / eval only\",\n )\n parser.add_argument(\n \"--accum-freq\", type=int, default=1, help=\"Update the model every --acum-freq steps.\"\n )\n # arguments for distributed training\n parser.add_argument(\n \"--dist-url\",\n default=\"env://\",\n type=str,\n help=\"url used to set up distributed training\",\n )\n parser.add_argument(\n \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n )\n parser.add_argument(\n \"--report-to\",\n default='',\n type=str,\n help=\"Options are ['wandb', 'tensorboard', 'wandb,tensorboard']\"\n )\n parser.add_argument(\n \"--wandb-notes\",\n default='',\n type=str,\n help=\"Notes if logging with wandb\"\n )\n parser.add_argument(\n \"--wandb-project-name\",\n type=str,\n default='open-clip',\n help=\"Name of the project if logging with wandb.\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"If true, more information is logged.\"\n )\n parser.add_argument(\n \"--copy-codebase\",\n default=False,\n action=\"store_true\",\n help=\"If true, we copy the entire base on the log directory, and execute from there.\"\n )\n parser.add_argument(\n \"--horovod\",\n default=False,\n action=\"store_true\",\n help=\"Use horovod for distributed training.\"\n )\n parser.add_argument(\n \"--ddp-static-graph\",\n default=False,\n action='store_true',\n help=\"Enable static graph optimization for DDP in PyTorch >= 1.11.\",\n )\n parser.add_argument(\n \"--no-set-device-rank\",\n default=False,\n action=\"store_true\",\n help=\"Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=0, help=\"Default random seed.\"\n )\n parser.add_argument(\n \"--grad-clip-norm\", type=float, default=None, help=\"Gradient clip.\"\n )\n parser.add_argument(\n \"--lock-text\",\n default=False,\n action='store_true',\n help=\"Lock full text tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-text-unlocked-layers\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-text-freeze-layer-norm\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n \"--log-every-n-steps\",\n type=int,\n default=100,\n help=\"Log every n steps to tensorboard/console/wandb.\",\n )\n parser.add_argument(\n \"--coca-caption-loss-weight\",\n type=float,\n default=2.0,\n help=\"Weight assigned to caption loss in CoCa.\"\n )\n parser.add_argument(\n \"--coca-contrastive-loss-weight\",\n type=float,\n default=1.0,\n help=\"Weight assigned to contrastive loss when training CoCa.\"\n )\n parser.add_argument(\n \"--remote-sync\",\n type=str,\n default=None,\n help=\"Optinoally sync with a remote path specified by this arg\",\n )\n parser.add_argument(\n \"--remote-sync-frequency\",\n type=int,\n default=300,\n help=\"How frequently to sync to a remote directly if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--remote-sync-protocol\",\n choices=[\"s3\", \"fsspec\"],\n default=\"s3\",\n help=\"How to do the remote sync backup if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--delete-previous-checkpoint\",\n default=False,\n action=\"store_true\",\n help=\"If true, delete previous checkpoint after storing a new one.\"\n )\n parser.add_argument(\n \"--distill-model\",\n default=None,\n help='Which model arch to distill from, if any.'\n )\n parser.add_argument(\n \"--distill-pretrained\",\n default=None,\n help='Which pre-trained weights to distill from, if any.'\n )\n # newly added flag for adding random rotation into data augmentation\n parser.add_argument(\n \"--random-rotation\",\n action=\"store_true\",\n default=False,\n help=\"If True, add random rotation into image transform for data augmentation (only for training).\"\n )\n # newly added for testing zero-shot and linear probe classification (custom dataset)\n parser.add_argument(\n \"--datasets-for-testing\",\n nargs='*',\n type=str,\n default=None,\n help=\"A list of names of datasets for testing zero-shot classification testing\",\n )\n parser.add_argument(\n \"--classification-mode\",\n type=str,\n default=\"multiclass\",\n help=\"Choose either binary or multiclass\",\n )\n parser.add_argument(\n \"--test-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with test data (e.g., for testing zero-shot classification)\",\n )\n parser.add_argument(\n \"--classnames\",\n type=str,\n default=None,\n help=\"Path to txt file containing class names\",\n )\n parser.add_argument(\n \"--test-data-name\",\n type=str,\n default=None,\n help=\"The name of the test data (e.g., RSICD, EuroSat)\",\n )\n parser.add_argument(\n \"--csv-class-key\",\n type=str,\n default=\"label\",\n help=\"For csv-like datasets, the name of the key for image labels (for classification).\"\n )\n parser.add_argument(\n \"--csv-actual-label-key\",\n type=str,\n default=\"binary\",\n help=\"If classification_model=binary, then specify the name of the key for actual binary labels (i.e., 0/1).\"\n )\n parser.add_argument(\n \"--alpha\",\n type=float,\n default=None,\n help=\"The regularization multiplier of logistic regression to try for linear probing. If None, do a search.\"\n )\n parser.add_argument(\n \"--samples-per-class\",\n type=str,\n default=None,\n help=\"Numbers of samples per class to train logistic regression for linear probing. If None, use full dataset.\"\n )\n parser.add_argument(\n \"--test-result-save-path\",\n type=str,\n default=None,\n help=\"The path to save test results as a pickle file.\"\n )\n parser.add_argument(\n \"--debugging\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use debugging mode, which will return more information.\"\n )\n \n args = parser.parse_args(args)\n\n # If some params are not passed, we use the default values based on model name.\n default_params = get_default_params(args.model)\n for name, val in default_params.items():\n if getattr(args, name) is None:\n setattr(args, name, val)\n\n return args" } ]
import glob import json import logging import os import re import subprocess import sys import random import numpy as np import torch import wandb import torch.utils.tensorboard as tensorboard import horovod.torch as hvd from datetime import datetime from torch import optim from torch.cuda.amp import GradScaler from torchvision import transforms from src.open_clip.factory import create_model_and_transforms, get_tokenizer, create_loss from src.open_clip.model import trace_model from src.training.data import get_data from src.training.distributed import is_master, init_distributed_device, broadcast_object from src.training.logger import setup_logging from src.training.scheduler import cosine_lr, const_lr, const_lr_cooldown from src.training.train import train_one_epoch, evaluate from src.training.file_utils import pt_load, check_exists, start_sync_process, remote_sync from src.training.main import natural_key, get_latest_checkpoint, copy_codebase from test_zero_shot_classification import * from params import parse_args
13,187
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) ddp_args = {} if args.ddp_static_graph: # this doesn't exist in older PyTorch, arg only added if enabled ddp_args['static_graph'] = True model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args) if args.distill: dist_model = torch.nn.parallel.DistributedDataParallel(dist_model, device_ids=[device], **ddp_args) # create optimizer and scaler optimizer = None scaler = None if args.train_data or args.dataset_type == "synthetic": assert not args.trace, 'Cannot train with traced model' exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n include = lambda n, p: not exclude(n, p) named_parameters = list(model.named_parameters()) gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad] rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad] optimizer = optim.AdamW( [ {"params": gain_or_bias_params, "weight_decay": 0.}, {"params": rest_params, "weight_decay": args.wd}, ], lr=args.lr, betas=(args.beta1, args.beta2), eps=args.eps, ) if args.horovod: optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters()) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) scaler = GradScaler() if args.precision == "amp" else None # optionally resume from a checkpoint start_epoch = 0 if args.resume is not None: checkpoint = pt_load(args.resume, map_location='cpu') if 'epoch' in checkpoint: # resuming a train checkpoint w/ epoch and optimizer state start_epoch = checkpoint["epoch"] sd = checkpoint["state_dict"] if not args.distributed and next(iter(sd.items()))[0].startswith('module'): sd = {k[len('module.'):]: v for k, v in sd.items()} model.load_state_dict(sd) if optimizer is not None: optimizer.load_state_dict(checkpoint["optimizer"]) if scaler is not None and 'scaler' in checkpoint: scaler.load_state_dict(checkpoint['scaler']) logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})") else: # loading a bare (model only) checkpoint for fine-tune or evaluation model.load_state_dict(checkpoint) logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})") # initialize datasets data = get_data(args, (preprocess_train, preprocess_val), epoch=start_epoch, tokenizer=get_tokenizer(args.model)) assert len(data), 'At least one train or eval dataset must be specified.' # initialize benchmark dataloaders for testing zero-shot classification if args.datasets_for_testing is not None or args.test_data_name is not None: test_dataloaders = get_test_dataloaders(args, preprocess_val) else: test_dataloaders = None # create scheduler if train scheduler = None if 'train' in data and optimizer is not None: total_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs if args.lr_scheduler == "cosine": scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const": scheduler = const_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const-cooldown": assert args.epochs_cooldown is not None,\ "Please specify the number of cooldown epochs for this lr schedule." cooldown_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs_cooldown scheduler = const_lr_cooldown( optimizer, args.lr, args.warmup, total_steps, cooldown_steps, args.lr_cooldown_power, args.lr_cooldown_end) else: logging.error( f'Unknown scheduler, {args.lr_scheduler}. Available options are: cosine, const, const-cooldown.') exit(1) # determine if this worker should save logs and checkpoints. only do so if it is rank == 0 args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args) writer = None if args.save_logs and args.tensorboard: assert tensorboard is not None, "Please install tensorboard." writer = tensorboard.SummaryWriter(args.tensorboard_path) if args.wandb and is_master(args): assert wandb is not None, 'Please install wandb.' logging.debug('Starting wandb.') args.train_sz = data["train"].dataloader.num_samples if args.val_data is not None: args.val_sz = data["val"].dataloader.num_samples # you will have to configure this for your project! wandb.init( project=args.wandb_project_name, name=args.name, id=args.name, notes=args.wandb_notes, tags=[], resume='auto' if args.resume == "latest" else None, config=vars(args), ) if args.debug: wandb.watch(model, log='all') wandb.save(params_file) logging.debug('Finished loading wandb.') if 'train' not in data:
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: except ImportError: wandb = None try: except ImportError: tensorboard = None try: except ImportError: hvd = None # from src.open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss LATEST_CHECKPOINT_NAME = "epoch_latest.pt" def RandomRotationNew(image): angle = random.choice([0, 90, 180, 270]) image = transforms.functional.rotate(image, angle) return image def zero_shot_eval_during_training(model, test_dataloaders, epoch, args, tb_writer=None): logging.info('Starting zero-shot evaluation.') zero_shot_metrics = {} for dataset_name in test_dataloaders: logging.info(f'Evaluating zero-shot classification for dataset {dataset_name}') results = test_zero_shot_classification(model, test_dataloaders[dataset_name]['dataloader'], test_dataloaders[dataset_name]['labels'], test_dataloaders[dataset_name]['is_binary'], args, dataset_name=dataset_name, debugging=args.debugging) for k, v in results.items(): if type(v) in [float, int, np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: zero_shot_metrics[k] = v logging.info( f"Zero-Shot Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in zero_shot_metrics.items()]) ) if args.save_logs: for name, val in zero_shot_metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(zero_shot_metrics)) f.write("\n") # if args.wandb: # assert wandb is not None, 'Please install wandb.' # for name, val in zero_shot_metrics.items(): # wandb.log({f"val/{name}": val, 'epoch': epoch}) logging.info('Finished zero-shot evaluation.') return zero_shot_metrics def train_and_test(args): args = parse_args(args) if torch.cuda.is_available(): # This enables tf32 on Ampere GPUs which is only 8% slower than # float16 and almost as accurate as float32 # This was a default in pytorch until 1.12 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False # fully initialize distributed device environment device = init_distributed_device(args) # get the name of the experiments if args.name is None: # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? model_name_safe = args.model.replace('/', '-') date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") if args.distributed: # sync date_str from master to all ranks date_str = broadcast_object(args, date_str) args.name = '-'.join([ date_str, f"model_{model_name_safe}", f"lr_{args.lr}", f"b_{args.batch_size}", f"j_{args.workers}", f"p_{args.precision}", ]) resume_latest = args.resume == 'latest' log_base_path = os.path.join(args.logs, args.name) args.log_path = None if is_master(args, local=args.log_local): os.makedirs(log_base_path, exist_ok=True) log_filename = f'out-{args.rank}' if args.log_local else 'out.log' args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path) and not resume_latest: print( "Error. Experiment already exists. Use --name {} to specify a new experiment." ) return -1 # Setup text logger args.log_level = logging.DEBUG if args.debug else logging.INFO setup_logging(args.log_path, args.log_level) # Setup wandb, tensorboard, checkpoint logging args.wandb = 'wandb' in args.report_to or 'all' in args.report_to args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to args.checkpoint_path = os.path.join(log_base_path, "checkpoints") if is_master(args): args.tensorboard_path = os.path.join(log_base_path, "tensorboard") if args.tensorboard else '' for dirname in [args.tensorboard_path, args.checkpoint_path]: if dirname: os.makedirs(dirname, exist_ok=True) else: args.tensorboard_path = '' if resume_latest: resume_from = None checkpoint_path = args.checkpoint_path # If using remote_sync, need to check the remote instead of the local checkpoints folder. if args.remote_sync is not None: checkpoint_path = os.path.join(args.remote_sync, args.name, "checkpoints") if args.save_most_recent: print('Error. Cannot use save-most-recent with remote_sync and resume latest.') return -1 if args.remote_sync_protocol != 's3': print('Error. Sync protocol not supported when using resume latest.') return -1 if is_master(args): # Checking for existing checkpoint via master rank only. It is possible for # different rank processes to see different files if a shared file-system is under # stress, however it's very difficult to fully work around such situations. if args.save_most_recent: # if --save-most-recent flag is set, look for latest at a fixed filename resume_from = os.path.join(checkpoint_path, LATEST_CHECKPOINT_NAME) if not os.path.exists(resume_from): # If no latest checkpoint has been saved yet, don't try to resume resume_from = None else: # otherwise, list checkpoint dir contents and pick the newest checkpoint resume_from = get_latest_checkpoint(checkpoint_path, remote=args.remote_sync is not None) if resume_from: logging.info(f'Found latest resume checkpoint at {resume_from}.') else: logging.info(f'No latest resume checkpoint found in {checkpoint_path}.') if args.distributed: # sync found checkpoint path to all ranks resume_from = broadcast_object(args, resume_from) args.resume = resume_from if args.copy_codebase: copy_codebase(args) # start the sync proces if remote-sync is not None remote_sync_process = None if is_master(args) and args.remote_sync is not None: # first make sure it works result = remote_sync( os.path.join(args.logs, args.name), os.path.join(args.remote_sync, args.name), args.remote_sync_protocol ) if result: logging.info('remote sync successful.') else: logging.info('Error: remote sync failed. Exiting.') return -1 # if all looks good, start a process to do this every args.remote_sync_frequency seconds remote_sync_process = start_sync_process( args.remote_sync_frequency, os.path.join(args.logs, args.name), os.path.join(args.remote_sync, args.name), args.remote_sync_protocol ) remote_sync_process.start() if args.precision == 'fp16': logging.warning( 'It is recommended to use AMP mixed-precision instead of FP16. ' 'FP16 support needs further verification and tuning, especially for train.') if args.horovod: logging.info( f'Running in horovod mode with multiple processes / nodes. Device: {args.device}.' f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.') elif args.distributed: logging.info( f'Running in distributed mode with multiple processes. Device: {args.device}.' f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.') else: logging.info(f'Running with a single process. Device {args.device}.') dist_model = None args.distill = args.distill_model is not None and args.distill_pretrained is not None if args.distill: #FIXME: support distillation with grad accum. assert args.accum_freq == 1 #FIXME: support distillation with coca. assert 'coca' not in args.model.lower() if isinstance(args.force_image_size, (tuple, list)) and len(args.force_image_size) == 1: # arg is nargs, single (square) image size list -> int args.force_image_size = args.force_image_size[0] random_seed(args.seed, 0) model, preprocess_train, preprocess_val = create_model_and_transforms( args.model, args.pretrained, precision=args.precision, device=device, jit=args.torchscript, force_quick_gelu=args.force_quick_gelu, force_custom_text=args.force_custom_text, force_patch_dropout=args.force_patch_dropout, force_image_size=args.force_image_size, pretrained_image=args.pretrained_image, image_mean=args.image_mean, image_std=args.image_std, aug_cfg=args.aug_cfg, output_dict=True, ) if args.random_rotation: # add random rotation step into preprocess_train for i, trans in enumerate(preprocess_train.transforms): if type(trans) == transforms.transforms.ToTensor: # insert random rotation right before ToTensor preprocess_train.transforms.insert(i, transforms.Lambda(RandomRotationNew)) break if args.distill: # FIXME: currenlty assumes the model your distilling from has the same tokenizer & transforms. dist_model, _, _ = create_model_and_transforms( args.distill_model, args.distill_pretrained, device=device, precision=args.precision, output_dict=True, ) random_seed(args.seed, args.rank) if args.trace: model = trace_model(model, batch_size=args.batch_size, device=device) if args.lock_image: # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 model.lock_image_tower( unlocked_groups=args.lock_image_unlocked_groups, freeze_bn_stats=args.lock_image_freeze_bn_stats) if args.lock_text: model.lock_text_tower( unlocked_layers=args.lock_text_unlocked_layers, freeze_layer_norm=args.lock_text_freeze_layer_norm) if args.grad_checkpointing: model.set_grad_checkpointing() if is_master(args): logging.info("Model:") logging.info(f"{str(model)}") logging.info("Params:") params_file = os.path.join(args.logs, args.name, "params.txt") with open(params_file, "w") as f: for name in sorted(vars(args)): val = getattr(args, name) logging.info(f" {name}: {val}") f.write(f"{name}: {val}\n") if args.distributed and not args.horovod: if args.use_bn_sync: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) ddp_args = {} if args.ddp_static_graph: # this doesn't exist in older PyTorch, arg only added if enabled ddp_args['static_graph'] = True model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args) if args.distill: dist_model = torch.nn.parallel.DistributedDataParallel(dist_model, device_ids=[device], **ddp_args) # create optimizer and scaler optimizer = None scaler = None if args.train_data or args.dataset_type == "synthetic": assert not args.trace, 'Cannot train with traced model' exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n include = lambda n, p: not exclude(n, p) named_parameters = list(model.named_parameters()) gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad] rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad] optimizer = optim.AdamW( [ {"params": gain_or_bias_params, "weight_decay": 0.}, {"params": rest_params, "weight_decay": args.wd}, ], lr=args.lr, betas=(args.beta1, args.beta2), eps=args.eps, ) if args.horovod: optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters()) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) scaler = GradScaler() if args.precision == "amp" else None # optionally resume from a checkpoint start_epoch = 0 if args.resume is not None: checkpoint = pt_load(args.resume, map_location='cpu') if 'epoch' in checkpoint: # resuming a train checkpoint w/ epoch and optimizer state start_epoch = checkpoint["epoch"] sd = checkpoint["state_dict"] if not args.distributed and next(iter(sd.items()))[0].startswith('module'): sd = {k[len('module.'):]: v for k, v in sd.items()} model.load_state_dict(sd) if optimizer is not None: optimizer.load_state_dict(checkpoint["optimizer"]) if scaler is not None and 'scaler' in checkpoint: scaler.load_state_dict(checkpoint['scaler']) logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})") else: # loading a bare (model only) checkpoint for fine-tune or evaluation model.load_state_dict(checkpoint) logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})") # initialize datasets data = get_data(args, (preprocess_train, preprocess_val), epoch=start_epoch, tokenizer=get_tokenizer(args.model)) assert len(data), 'At least one train or eval dataset must be specified.' # initialize benchmark dataloaders for testing zero-shot classification if args.datasets_for_testing is not None or args.test_data_name is not None: test_dataloaders = get_test_dataloaders(args, preprocess_val) else: test_dataloaders = None # create scheduler if train scheduler = None if 'train' in data and optimizer is not None: total_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs if args.lr_scheduler == "cosine": scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const": scheduler = const_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const-cooldown": assert args.epochs_cooldown is not None,\ "Please specify the number of cooldown epochs for this lr schedule." cooldown_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs_cooldown scheduler = const_lr_cooldown( optimizer, args.lr, args.warmup, total_steps, cooldown_steps, args.lr_cooldown_power, args.lr_cooldown_end) else: logging.error( f'Unknown scheduler, {args.lr_scheduler}. Available options are: cosine, const, const-cooldown.') exit(1) # determine if this worker should save logs and checkpoints. only do so if it is rank == 0 args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args) writer = None if args.save_logs and args.tensorboard: assert tensorboard is not None, "Please install tensorboard." writer = tensorboard.SummaryWriter(args.tensorboard_path) if args.wandb and is_master(args): assert wandb is not None, 'Please install wandb.' logging.debug('Starting wandb.') args.train_sz = data["train"].dataloader.num_samples if args.val_data is not None: args.val_sz = data["val"].dataloader.num_samples # you will have to configure this for your project! wandb.init( project=args.wandb_project_name, name=args.name, id=args.name, notes=args.wandb_notes, tags=[], resume='auto' if args.resume == "latest" else None, config=vars(args), ) if args.debug: wandb.watch(model, log='all') wandb.save(params_file) logging.debug('Finished loading wandb.') if 'train' not in data:
evaluate(model, data, start_epoch, args, writer)
13
2023-12-19 11:50:56+00:00
16k
penghao-wu/vstar
VisualSearch/train.py
[ { "identifier": "VSMForCausalLM", "path": "VisualSearch/model/VSM.py", "snippet": "class VSMForCausalLM(LlavaLlamaForCausalLM):\n\tdef __init__(\n\t\tself,\n\t\tconfig,\n\t\t**kwargs,\n\t):\n\t\tif not hasattr(config, \"train_mask_decoder\"):\n\t\t\tconfig.mm_use_im_start_end = kwargs.pop(\"use_mm_start_end\", True)\n\t\t\tconfig.mm_vision_tower = kwargs.get(\n\t\t\t\t\"vision_tower\", \"openai/clip-vit-large-patch14\"\n\t\t\t)\n\t\t\tself.ce_loss_weight = kwargs.pop(\"ce_loss_weight\", None)\n\t\t\tself.dice_loss_weight = kwargs.pop(\"dice_loss_weight\", None)\n\t\t\tself.bce_loss_weight = kwargs.pop(\"bce_loss_weight\", None)\n\t\t\tself.det_loss_weight = kwargs.pop(\"det_loss_weight\", None)\n\t\telse:\n\t\t\tconfig.mm_vision_tower = config.vision_tower\n\n\t\tself.loc_token_idx = kwargs.pop(\"loc_token_idx\")\n\n\t\tsuper().__init__(config)\n\n\t\tself.model = VSMModel(config, **kwargs)\n\n\t\tself.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n\t\t# Initialize weights and apply final processing\n\t\tself.post_init()\n\n\tdef get_visual_embs(self, pixel_values: torch.FloatTensor):\n\t\twith torch.no_grad():\n\t\t\timage_embeddings = self.model.owlvit.get_visual_embs(pixel_values)\n\t\treturn image_embeddings\n\n\tdef forward(self, **kwargs):\n\t\tif \"past_key_values\" in kwargs:\n\t\t\treturn super().forward(**kwargs)\n\t\treturn self.model_forward(**kwargs)\n\n\tdef model_forward(\n\t\tself,\n\t\timages: torch.FloatTensor,\n\t\timages_clip: torch.FloatTensor,\n\t\tinput_ids: torch.LongTensor,\n\t\tlabels: torch.LongTensor,\n\t\tattention_masks: torch.LongTensor,\n\t\toffset: torch.LongTensor,\n\t\tmasks_list: List[torch.FloatTensor],\n\t\tlabel_list: List[torch.Tensor],\n\t\tbboxes_labels_list: List[torch.FloatTensor],\n\t\tbboxes_valid_list: torch.Tensor,\n\t\tmasks_valid_list: List[torch.Tensor],\n\t\tresize_list: List[tuple],\n\t\tinference: bool = False,\n\t\t**kwargs,\n\t):\n\t\timage_embeddings = self.get_visual_embs(images)\n\t\tbatch_size = image_embeddings.shape[0]\n\t\tassert batch_size == len(offset) - 1\n\n\t\tloc_token_mask = input_ids[:, 1:] == self.loc_token_idx\n\t\tloc_token_mask = torch.cat(\n\t\t\t[\n\t\t\t\tloc_token_mask,\n\t\t\t\ttorch.zeros((loc_token_mask.shape[0], 1)).bool().cuda(),\n\t\t\t],\n\t\t\tdim=1,\n\t\t)\n\t\t# hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)\n\t\tloc_token_mask = torch.cat(\n\t\t\t[torch.zeros((loc_token_mask.shape[0], 255)).bool().cuda(), loc_token_mask],\n\t\t\tdim=1,\n\t\t)\n\n\t\tif inference:\n\t\t\tn_batch = 1\n\t\t\tlength = input_ids.shape[0]\n\t\t\tassert images_clip.shape[0] == 1\n\t\t\timages_clip_extend = images_clip.expand(length, -1, -1, -1).contiguous()\n\n\t\t\toutput_hidden_states = []\n\t\t\tfor i in range(n_batch):\n\t\t\t\tstart_i, end_i = i * length, min((i + 1) * length, input_ids.shape[0])\n\t\t\t\toutput_i = super().forward(\n\t\t\t\t\timages=images_clip_extend[: end_i - start_i],\n\t\t\t\t\tattention_mask=attention_masks[start_i:end_i],\n\t\t\t\t\tinput_ids=input_ids[start_i:end_i],\n\t\t\t\t\toutput_hidden_states=True,\n\t\t\t\t)\n\t\t\t\toutput_hidden_states.append(output_i.hidden_states)\n\t\t\t\ttorch.cuda.empty_cache()\n\n\t\t\toutput_hidden_states_list = []\n\t\t\toutput_hidden_states_level = torch.cat(output_hidden_states, dim=0)\n\t\t\toutput_hidden_states_list.append(output_hidden_states_level)\n\t\t\toutput_hidden_states = output_hidden_states_list\n\t\t\toutput = None\n\n\t\telse:\n\t\t\timages_clip_list = []\n\t\t\tfor i in range(len(offset) - 1):\n\t\t\t\tstart_i, end_i = offset[i], offset[i + 1]\n\t\t\t\timages_clip_i = (\n\t\t\t\t\timages_clip[i]\n\t\t\t\t\t.unsqueeze(0)\n\t\t\t\t\t.expand(end_i - start_i, -1, -1, -1)\n\t\t\t\t\t.contiguous()\n\t\t\t\t)\n\t\t\t\timages_clip_list.append(images_clip_i)\n\t\t\timages_clip = torch.cat(images_clip_list, dim=0)\n\n\t\t\toutput = super().forward(\n\t\t\t\timages=images_clip,\n\t\t\t\tattention_mask=attention_masks,\n\t\t\t\tinput_ids=input_ids,\n\t\t\t\tlabels=labels,\n\t\t\t\toutput_hidden_states=True,\n\t\t\t)\n\t\t\toutput_hidden_states = output.hidden_states\n\n\t\t# seg\n\t\thidden_states_seg = []\n\t\tassert len(self.model.text_hidden_fcs_seg) == 1\n\t\thidden_states_seg.append(self.model.text_hidden_fcs_seg[0](output_hidden_states[-1]))\n\n\t\tlast_hidden_state_seg = torch.stack(hidden_states_seg, dim=-1).sum(dim=-1)\n\n\t\t# det\n\t\thidden_states_det = []\n\n\t\tassert len(self.model.text_hidden_fcs_det) == 1\n\t\thidden_states_det.append(self.model.text_hidden_fcs_det[0](output_hidden_states[-1]))\n\t\tlast_hidden_state_det = torch.stack(hidden_states_det, dim=-1).sum(dim=-1)\n\n\t\tpred_embeddings_seg = last_hidden_state_seg[loc_token_mask]\n\t\tpred_embeddings_det = last_hidden_state_det[loc_token_mask]\n\t\tloc_token_counts = loc_token_mask.int().sum(-1) # [bs, ]\n\n\t\tloc_token_offset = loc_token_counts.cumsum(-1)\n\t\tloc_token_offset = torch.cat(\n\t\t\t[torch.zeros(1).long().cuda(), loc_token_offset], dim=0\n\t\t)\n\n\t\tloc_token_offset = loc_token_offset[offset]\n\n\t\tpred_embeddings_seg_ = []\n\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\tpred_embeddings_seg_.append(pred_embeddings_seg[start_i:end_i])\n\t\tpred_embeddings_seg = pred_embeddings_seg_\n\n\t\tpred_embeddings_det_ = []\n\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\tpred_embeddings_det_.append(pred_embeddings_det[start_i:end_i])\n\t\tpred_embeddings_det = pred_embeddings_det_\n\n\t\t# seg branch \n\t\tmultimask_output = False\n\t\tpred_masks = []\n\t\tfor i in range(len(pred_embeddings_seg)):\n\t\t\t(\n\t\t\t\tsparse_embeddings,\n\t\t\t\tdense_embeddings,\n\t\t\t) = self.model.prompt_encoder(\n\t\t\t\tpoints=None,\n\t\t\t\tboxes=None,\n\t\t\t\tmasks=None,\n\t\t\t\ttext_embeds=pred_embeddings_seg[i].unsqueeze(1),\n\t\t\t)\n\t\t\tsparse_embeddings = sparse_embeddings.to(pred_embeddings_seg[i].dtype)\n\t\t\tlow_res_masks, iou_predictions = self.model.mask_decoder(\n\t\t\t\timage_embeddings=self.model.visual_projection(image_embeddings[i].unsqueeze(0)).permute(0, 3, 1, 2),\n\t\t\t\timage_pe=self.model.prompt_encoder.get_dense_pe(),\n\t\t\t\tsparse_prompt_embeddings=sparse_embeddings,\n\t\t\t\tdense_prompt_embeddings=dense_embeddings,\n\t\t\t\tmultimask_output=multimask_output,\n\t\t\t)\n\t\t\tpred_mask = F.interpolate(\n\t\t\tlow_res_masks, label_list[i].shape, mode=\"bilinear\", align_corners=False\n\t\t)\n\t\t\tpred_masks.append(pred_mask[:, 0])\n\n\t\tgt_masks = masks_list\n\n\t\t# det branch\n\t\tdetection_result_batch = []\n\t\tfor i in range(len(pred_embeddings_det)):\n\t\t\tbs = pred_embeddings_det[i].shape[0]\n\t\t\tdetection_result = self.model.owlvit(image_embeddings[i].unsqueeze(0).repeat(bs, 1, 1, 1), pred_embeddings_det[i].unsqueeze(1))\n\t\t\tdetection_result_batch.append(detection_result)\n\n\n\t\tpred_logits = torch.cat([detection_result['pred_logits'] for detection_result in detection_result_batch], 0)\n\t\tpred_boxes = torch.cat([detection_result['pred_boxes'] for detection_result in detection_result_batch], 0)\n\t\tif inference:\n\t\t\treturn {\n\t\t\t\t\"pred_masks\": pred_masks,\n\t\t\t\t\"gt_masks\": gt_masks,\n\t\t\t\t\"pred_logits\": pred_logits,\n\t\t\t\t\"pred_boxes\": pred_boxes,\n\t\t\t\t\"gt_bboxes\": bboxes_labels_list\n\t\t\t}\n\t\t\n\t\tnum_boxes = 0\n\t\tfor bboxes_labels, bboxes_valid in zip(bboxes_labels_list, bboxes_valid_list):\n\t\t\tif bboxes_valid:\n\t\t\t\tnum_boxes += len(bboxes_labels)\n\t\tnum_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=image_embeddings.device)\n\t\tnum_boxes = torch.clamp(num_boxes, min=1).item()\n\t\t\n\t\tdetection_result_batch = {'pred_logits':pred_logits, 'pred_boxes':pred_boxes}\n\n\t\ttarget_det = []\n\t\tall_bboxes_valid = []\n\t\tfor bboxes_label, bboxes_valid in zip(bboxes_labels_list, bboxes_valid_list):\n\t\t\ttarget_det.append({\"labels\":torch.zeros(len(bboxes_label)).to(bboxes_label.device, torch.long), \"boxes\":bboxes_label})\n\t\t\tif bboxes_valid:\n\t\t\t\tall_bboxes_valid.append(torch.ones((min(24*24, len(bboxes_label)), 1)).to(bboxes_label.device, torch.long))\n\t\t\telse:\n\t\t\t\tall_bboxes_valid.append(torch.zeros((min(24*24, len(bboxes_label)), 1)).to(bboxes_label.device, torch.long))\n\t\tall_bboxes_valid = torch.cat(all_bboxes_valid, 0)\n\t\t\n\t\tloss_dict = self.model.owlvit.criterion(detection_result_batch, target_det, num_boxes)\n\n\t\tfor loss_k, loss_v in loss_dict.items():\n\t\t\tif \"loss_ce\" in loss_k:\n\t\t\t\tloss_dict[loss_k] = (loss_v*bboxes_valid_list.unsqueeze(-1)).mean()\n\t\t\telse:\n\t\t\t\tloss_dict[loss_k] = (loss_v*all_bboxes_valid).sum()\n\n\t\tweight_dict = self.model.owlvit.criterion.weight_dict\n\t\tdetection_loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)\n\t\tdetection_loss = detection_loss*self.det_loss_weight\n\n\t\tmodel_output = output\n\t\toutput = model_output.logits\n\n\t\tce_loss = model_output.loss\n\t\tce_loss = ce_loss * self.ce_loss_weight\n\t\tmask_bce_loss = 0\n\t\tmask_dice_loss = 0\n\t\tnum_masks = 0\n\t\tfor batch_idx in range(len(pred_masks)):\n\t\t\tgt_mask = gt_masks[batch_idx]\n\t\t\tpred_mask = pred_masks[batch_idx]\n\t\t\tmasks_valid = masks_valid_list[batch_idx]\n\n\t\t\tmask_bce_loss += (\n\t\t\t\tsigmoid_ce_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])\n\t\t\t\t* gt_mask.shape[0] * masks_valid\n\t\t\t).sum()\n\t\t\tmask_dice_loss += (\n\t\t\t\tdice_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])\n\t\t\t\t* gt_mask.shape[0] * masks_valid\n\t\t\t).sum()\n\t\t\tnum_masks += masks_valid.sum()\n\n\t\tmask_bce_loss = self.bce_loss_weight * mask_bce_loss / (num_masks + 1e-8)\n\t\tmask_dice_loss = self.dice_loss_weight * mask_dice_loss / (num_masks + 1e-8)\n\t\tmask_loss = mask_bce_loss + mask_dice_loss\n\n\t\tloss = ce_loss + mask_loss + detection_loss\n\n\t\treturn {\n\t\t\t\"loss\": loss,\n\t\t\t\"ce_loss\": ce_loss,\n\t\t\t\"mask_bce_loss\": mask_bce_loss,\n\t\t\t\"mask_dice_loss\": mask_dice_loss,\n\t\t\t\"mask_loss\": mask_loss,\n\t\t\t\"detection_loss\": detection_loss,\n\t\t\t\"detection_loss_ce\": loss_dict['loss_ce'],\n\t\t\t\"detection_loss_bbox\": loss_dict['loss_bbox'],\n\t\t\t\"detection_loss_giou\": loss_dict['loss_giou'],\n\t\t}\n\n\tdef inference(\n\t\tself,\n\t\timages_clip,\n\t\timages,\n\t\tinput_ids,\n\t\tresize_list,\n\t\toriginal_size_list,\n\t\tmax_new_tokens=32,\n\t\ttokenizer=None,\n\t\tmode = 'vqa'\n\t):\n\t\tassert mode in ['vqa', 'segmentation', 'detection']\n\t\twith torch.no_grad():\n\t\t\toutputs = self.generate(\n\t\t\t\timages=images_clip,\n\t\t\t\tinput_ids=input_ids,\n\t\t\t\tmax_new_tokens=max_new_tokens,\n\t\t\t\tnum_beams=1,\n\t\t\t\toutput_hidden_states=True,\n\t\t\t\treturn_dict_in_generate=True,\n\t\t\t)\n\t\t\toutput_hidden_states = outputs.hidden_states[-1]\n\t\t\toutput_ids = outputs.sequences\n\n\t\t\tif mode == 'vqa':\n\t\t\t\treturn output_ids, None, None\n\n\t\t\tloc_token_mask = output_ids[:, 1:] == self.loc_token_idx\n\t\t\t# hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)\n\t\t\tloc_token_mask = torch.cat(\n\t\t\t\t[\n\t\t\t\t\ttorch.zeros((loc_token_mask.shape[0], 255)).bool().cuda(),\n\t\t\t\t\tloc_token_mask,\n\t\t\t\t],\n\t\t\t\tdim=1,\n\t\t\t)\n\n\t\t\t# seg\n\t\t\thidden_states_seg = []\n\t\t\tassert len(self.model.text_hidden_fcs_seg) == 1\n\t\t\thidden_states_seg.append(self.model.text_hidden_fcs_seg[0](output_hidden_states))\n\n\t\t\tlast_hidden_state_seg = torch.stack(hidden_states_seg, dim=-1).sum(dim=-1)\n\n\t\t\t# det\n\t\t\thidden_states_det = []\n\n\t\t\tassert len(self.model.text_hidden_fcs_det) == 1\n\t\t\thidden_states_det.append(self.model.text_hidden_fcs_det[0](output_hidden_states))\n\t\t\tlast_hidden_state_det = torch.stack(hidden_states_det, dim=-1).sum(dim=-1)\n\n\t\t\tpred_embeddings_seg = last_hidden_state_seg[loc_token_mask]\n\t\t\tpred_embeddings_det = last_hidden_state_det[loc_token_mask]\n\t\t\tloc_token_counts = loc_token_mask.int().sum(-1) # [bs, ]\n\n\t\t\tloc_token_offset = loc_token_counts.cumsum(-1)\n\t\t\tloc_token_offset = torch.cat(\n\t\t\t\t[torch.zeros(1).long().cuda(), loc_token_offset], dim=0\n\t\t\t)\n\n\n\t\t\tpred_embeddings_seg_ = []\n\t\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\t\tpred_embeddings_seg_.append(pred_embeddings_seg[start_i:end_i])\n\t\t\tpred_embeddings_seg = pred_embeddings_seg_\n\n\t\t\tpred_embeddings_det_ = []\n\t\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\t\tpred_embeddings_det_.append(pred_embeddings_det[start_i:end_i])\n\t\t\tpred_embeddings_det = pred_embeddings_det_\n\n\t\t\timage_embeddings = self.get_visual_embs(images)\n\n\t\t\tmultimask_output = False\n\t\t\tpred_masks = []\n\t\t\tfor i in range(len(pred_embeddings_seg)):\n\t\t\t\t(\n\t\t\t\t\tsparse_embeddings,\n\t\t\t\t\tdense_embeddings,\n\t\t\t\t) = self.model.prompt_encoder(\n\t\t\t\t\tpoints=None,\n\t\t\t\t\tboxes=None,\n\t\t\t\t\tmasks=None,\n\t\t\t\t\ttext_embeds=pred_embeddings_seg[i].unsqueeze(1),\n\t\t\t\t)\n\n\t\t\t\tsparse_embeddings = sparse_embeddings.to(pred_embeddings_seg[i].dtype)\n\t\t\t\tlow_res_masks, iou_predictions = self.model.mask_decoder(\n\t\t\t\t\timage_embeddings=self.model.visual_projection(image_embeddings[i].unsqueeze(0)).permute(0, 3, 1, 2),\n\t\t\t\t\timage_pe=self.model.prompt_encoder.get_dense_pe(),\n\t\t\t\t\tsparse_prompt_embeddings=sparse_embeddings,\n\t\t\t\t\tdense_prompt_embeddings=dense_embeddings,\n\t\t\t\t\tmultimask_output=multimask_output,\n\t\t\t\t)\n\t\t\t\tpred_mask = F.interpolate(\n\t\t\t\tlow_res_masks.float(), original_size_list[i], mode=\"bilinear\", align_corners=False\n\t\t\t)\n\t\t\t\tpred_masks.append(pred_mask[:, 0])\n\n\t\t\tif mode == 'segmentation':\n\t\t\t\treturn None, pred_masks, None\n\n\t\t\t# detection model\n\t\t\tdetection_result_batch = []\n\t\t\tfor i in range(len(pred_embeddings_det)):\n\t\t\t\tbs = pred_embeddings_det[i].shape[0]\n\t\t\t\tdetection_result = self.model.owlvit(image_embeddings[i].unsqueeze(0).repeat(bs, 1, 1, 1), pred_embeddings_det[i].unsqueeze(1))\n\t\t\t\tdetection_result_batch.append(detection_result)\n\n\n\t\t\tpred_logits = torch.cat([detection_result['pred_logits'] for detection_result in detection_result_batch], 0)\n\t\t\tpred_boxes = torch.cat([detection_result['pred_boxes'] for detection_result in detection_result_batch], 0)\n\t\t\tdetection_result_batch = {'pred_logits':pred_logits, 'pred_boxes':pred_boxes}\n\n\t\treturn None, pred_masks, detection_result_batch" }, { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "HybridDataset", "path": "VisualSearch/utils/dataset.py", "snippet": "class HybridDataset(torch.utils.data.Dataset):\n\tpixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n\tpixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n\timg_size = 1024\n\tignore_label = 255\n\n\tdef __init__(\n\t\tself,\n\t\tbase_dir,\n\t\ttokenizer,\n\t\tvision_tower,\n\t\tsamples_per_epoch=500 * 8 * 2 * 10,\n\t\tprecision: str = \"fp32\",\n\t\tnum_classes_per_sample: int = 3,\n\t\texclude_val=False,\n\t\tdataset=\"general_segdet||refer_seg||vqa||reason_seg\",\n\t\tsample_rate=[9, 3, 3, 1],\n\t\tgeneral_segdet_data=\"objects365||cocostuff||paco_lvis\",\n\t\tgeneral_segdet_sample_rate=[2,1,1],\n\t\trefer_seg_data=\"refclef||refcoco||refcoco+||refcocog\",\n\t\tvqa_data=\"possible_locations_conv_86k||llava_instruct_80k\",\n\t\tvqa_sample_rate=[2,1],\n\t):\n\t\tself.exclude_val = exclude_val\n\t\tself.dataset = dataset\n\t\tself.samples_per_epoch = samples_per_epoch\n\t\tself.num_classes_per_sample = num_classes_per_sample\n\t\tsample_rate = np.array(sample_rate)\n\t\tself.sample_rate = sample_rate / sample_rate.sum()\n\n\t\tself.base_dir = base_dir\n\t\tself.tokenizer = tokenizer\n\t\tself.precision = precision\n\n\t\tself.datasets = dataset.split(\"||\")\n\n\t\tself.all_datasets = []\n\t\tfor dataset in self.datasets:\n\t\t\tif dataset == \"general_segdet\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tSegDetDataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t\tgeneral_segdet_data,\n\t\t\t\t\t\tgeneral_segdet_sample_rate,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\telif dataset == \"refer_seg\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tReferSegDataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t\trefer_seg_data,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\telif dataset == \"vqa\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tVQADataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t\tvqa_data,\n\t\t\t\t\t\tvqa_sample_rate,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\telif dataset == \"mixed_grounding\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tMixedGroundingDataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t)\n\t\t\t\t)\n\n\tdef __len__(self):\n\t\treturn self.samples_per_epoch\n\n\tdef __getitem__(self, idx):\n\t\tind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate)\n\t\tdata = self.all_datasets[ind]\n\t\tinference = False\n\t\treturn *data[0], inference" }, { "identifier": "ValDataset", "path": "VisualSearch/utils/dataset.py", "snippet": "class ValDataset(torch.utils.data.Dataset):\n\tpixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n\tpixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n\timg_size = 1024\n\tignore_label = 255\n\n\tdef __init__(\n\t\tself,\n\t\tbase_dir,\n\t\ttokenizer,\n\t\tvision_tower,\n\t\tval_dataset,\n\t):\n\t\tself.base_dir = base_dir\n\t\tsplits = val_dataset.split(\"|\")\n\t\tif len(splits) == 2:\n\t\t\tds, split = splits\n\t\t\timages = glob.glob(\n\t\t\t\tos.path.join(self.base_dir, \"reason_seg\", ds, split, \"*.jpg\")\n\t\t\t)\n\t\t\tself.images = images\n\t\t\tself.data_type = \"reason_seg\"\n\t\telif len(splits) == 3:\n\t\t\tself.base_dir = os.path.join(self.base_dir, 'refer_seg')\n\t\t\tds, splitBy, split = splits\n\t\t\trefer_api = REFER(self.base_dir, ds, splitBy)\n\t\t\tref_ids_val = refer_api.getRefIds(split=split)\n\t\t\timages_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val)\n\t\t\trefs_val = refer_api.loadRefs(ref_ids=ref_ids_val)\n\t\t\trefer_seg_ds = {}\n\t\t\trefer_seg_ds[\"images\"] = []\n\t\t\tloaded_images = refer_api.loadImgs(image_ids=images_ids_val)\n\t\t\tfor item in loaded_images:\n\t\t\t\titem = item.copy()\n\t\t\t\tif ds == \"refclef\":\n\t\t\t\t\titem[\"file_name\"] = os.path.join(\n\t\t\t\t\t\tself.base_dir, \"images/saiapr_tc-12\", item[\"file_name\"]\n\t\t\t\t\t)\n\t\t\t\telif ds in [\"refcoco\", \"refcoco+\", \"refcocog\", \"grefcoco\"]:\n\t\t\t\t\titem[\"file_name\"] = os.path.join(\n\t\t\t\t\t\tself.base_dir,\n\t\t\t\t\t\t\"images/mscoco/images/train2014\",\n\t\t\t\t\t\titem[\"file_name\"],\n\t\t\t\t\t)\n\t\t\t\trefer_seg_ds[\"images\"].append(item)\n\t\t\trefer_seg_ds[\"annotations\"] = refer_api.Anns # anns_val\n\n\t\t\timg2refs = {}\n\t\t\tfor ref in refs_val:\n\t\t\t\timage_id = ref[\"image_id\"]\n\t\t\t\timg2refs[image_id] = img2refs.get(image_id, []) + [\n\t\t\t\t\tref,\n\t\t\t\t]\n\t\t\trefer_seg_ds[\"img2refs\"] = img2refs\n\t\t\tself.refer_seg_ds = refer_seg_ds\n\t\t\tself.data_type = \"refer_seg\"\n\n\t\tself.ds = ds\n\t\tself.tokenizer = tokenizer\n\t\tself.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n\t\tself.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n\tdef __len__(self):\n\t\tif self.data_type == \"refer_seg\":\n\t\t\treturn len(self.refer_seg_ds[\"images\"])\n\t\telse:\n\t\t\treturn len(self.images)\n\n\tdef preprocess(self, x: torch.Tensor) -> torch.Tensor:\n\t\t\"\"\"Normalize pixel values and pad to a square input.\"\"\"\n\t\t# Normalize colors\n\t\tx = (x - self.pixel_mean) / self.pixel_std\n\n\t\t# Pad\n\t\th, w = x.shape[-2:]\n\t\tpadh = self.img_size - h\n\t\tpadw = self.img_size - w\n\t\tx = F.pad(x, (0, padw, 0, padh))\n\t\treturn x\n\n\tdef __getitem__(self, idx):\n\t\tif self.data_type == \"refer_seg\":\n\t\t\trefer_seg_ds = self.refer_seg_ds\n\t\t\timages = refer_seg_ds[\"images\"]\n\t\t\tannotations = refer_seg_ds[\"annotations\"]\n\t\t\timg2refs = refer_seg_ds[\"img2refs\"]\n\n\t\t\timage_info = images[idx]\n\t\t\timage_path = image_info[\"file_name\"]\n\t\t\timage_id = image_info[\"id\"]\n\n\t\t\trefs = img2refs[image_id]\n\t\t\tif len(refs) == 0:\n\t\t\t\traise ValueError(\"image {} has no refs\".format(image_id))\n\n\t\t\tsents = []\n\t\t\tann_ids = []\n\t\t\tfor ref in refs:\n\t\t\t\tfor sent in ref[\"sentences\"]:\n\t\t\t\t\tsents.append(sent[\"sent\"].strip().lower())\n\t\t\t\t\tann_ids.append(ref[\"ann_id\"])\n\n\t\t\tsampled_sents = sents\n\t\t\tsampled_ann_ids = ann_ids\n\t\t\timage = cv2.imread(image_path)\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\t\tis_sentence = False\n\t\telse:\n\t\t\timage_path = self.images[idx]\n\t\t\timage = cv2.imread(image_path)\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\t\tjson_path = image_path.replace(\".jpg\", \".json\")\n\t\t\tmask_json, sampled_sents, is_sentence = get_mask_from_json(json_path, image)\n\t\t\tsampled_sents = [sampled_sents[0]]\n\n\t\tconversations = []\n\t\tconv = conversation_lib.default_conversation.copy()\n\t\ti = 0\n\t\twhile i < len(sampled_sents):\n\t\t\tconv.messages = []\n\t\t\ttext = sampled_sents[i].strip()\n\t\t\tif is_sentence:\n\t\t\t\tconv.append_message(\n\t\t\t\t\tconv.roles[0],\n\t\t\t\t\tDEFAULT_IMAGE_TOKEN\n\t\t\t\t\t+ \"\\n {} Please output segmentation mask.\".format(text),\n\t\t\t\t)\n\t\t\t\tconv.append_message(conv.roles[1], \"[LOC].\")\n\t\t\telse:\n\t\t\t\tconv.append_message(\n\t\t\t\t\tconv.roles[0],\n\t\t\t\t\tDEFAULT_IMAGE_TOKEN\n\t\t\t\t\t+ \"\\n Please locate the {} in this image.\".format(\n\t\t\t\t\t\ttext\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t\tconv.append_message(conv.roles[1], \"Sure, [LOC].\")\n\t\t\tconversations.append(conv.get_prompt())\n\t\t\ti += 1\n\n\t\t# preprocess image for clip\n\t\timage_clip = self.clip_image_processor.preprocess(\n\t\t\t\texpand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n\t\toriginal_size = image.shape[:2]\n\n\t\timage = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n\t\tresize = image.shape[:2]\n\n\t\tif self.data_type == \"refer_seg\":\n\t\t\tmasks = []\n\t\t\tbboxes_labels = []\n\t\t\tfor i, ann_id in enumerate(sampled_ann_ids):\n\t\t\t\tann = annotations[ann_id]\n\t\t\t\tcur_bboxes = [ann['bbox']]\n\t\t\t\tcur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n\t\t\t\t# xywh to x1y1x2y2\n\t\t\t\tcur_bboxes[:, 2:] += cur_bboxes[:, :2]\n\t\t\t\tcur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n\t\t\t\tcur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n\t\t\t\tkeep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n\t\t\t\tcur_bboxes = cur_bboxes[keep]\n\t\t\t\tcur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n\t\t\t\tcur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n\t\t\t\tif len(cur_bboxes) == 0:\n\t\t\t\t\treturn self.__getitem__(0)\n\t\t\t\tbboxes_labels.append(cur_bboxes)\n\t\t\t\tif len(ann[\"segmentation\"]) == 0 and sampled_sents[i] != \"\":\n\t\t\t\t\tm = np.zeros((image_info[\"height\"], image_info[\"width\"], 1))\n\t\t\t\telse:\n\t\t\t\t\tif type(ann[\"segmentation\"][0]) == list: # polygon\n\t\t\t\t\t\trle = mask.frPyObjects(\n\t\t\t\t\t\t\tann[\"segmentation\"],\n\t\t\t\t\t\t\timage_info[\"height\"],\n\t\t\t\t\t\t\timage_info[\"width\"],\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\trle = ann[\"segmentation\"]\n\t\t\t\t\t\tfor i in range(len(rle)):\n\t\t\t\t\t\t\tif not isinstance(rle[i][\"counts\"], bytes):\n\t\t\t\t\t\t\t\trle[i][\"counts\"] = rle[i][\"counts\"].encode()\n\t\t\t\t\tm = mask.decode(rle)\n\t\t\t\tm = np.sum(\n\t\t\t\t\tm, axis=2\n\t\t\t\t) # sometimes there are multiple binary map (corresponding to multiple segs)\n\t\t\t\tm = m.astype(np.uint8) # convert to np.uint8\n\t\t\t\tmasks.append(m)\n\t\telse:\n\t\t\tmasks = [mask_json]\n\t\tbboxes_valid = [1]*len(bboxes_labels)\n\t\tmasks_valid = [1]*len(bboxes_labels)\n\t\tmasks = np.stack(masks, axis=0)\n\t\tmasks = torch.from_numpy(masks)\n\t\tlabels = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\t\tinference = True\n\n\t\treturn (\n\t\t\timage_path,\n\t\t\timage,\n\t\t\timage_clip,\n\t\t\tconversations,\n\t\t\tmasks,\n\t\t\tlabels,\n\t\t\tbboxes_labels,\n\t\t\tbboxes_valid,\n\t\t\tmasks_valid,\n\t\t\tresize,\n\t\t\tNone,\n\t\t\tNone,\n\t\t\tinference,\n\t\t)" }, { "identifier": "collate_fn", "path": "VisualSearch/utils/dataset.py", "snippet": "def collate_fn(\n\tbatch, tokenizer=None, conv_type=\"llava_v1\", use_mm_start_end=True, local_rank=-1\n):\n\timage_path_list = []\n\timages_list = []\n\timages_clip_list = []\n\tconversation_list = []\n\tmasks_list = []\n\tlabel_list = []\n\tbboxes_labels_list = []\n\tbboxes_valid_list = []\n\tmasks_valid_list = []\n\tresize_list = []\n\tquestions_list = []\n\tsampled_classes_list = []\n\toffset_list = [0]\n\tcnt = 0\n\tinferences = []\n\tfor (\n\t\timage_path,\n\t\timages,\n\t\timages_clip,\n\t\tconversations,\n\t\tmasks,\n\t\tlabel,\n\t\tbboxes_labels,\n\t\tbboxes_valid,\n\t\tmasks_valid,\n\t\tresize,\n\t\tquestions,\n\t\tsampled_classes,\n\t\tinference,\n\t) in batch:\n\t\timage_path_list.append(image_path)\n\t\timages_list.append(images)\n\t\timages_clip_list.append(images_clip)\n\t\tconversation_list.extend(conversations)\n\t\tlabel_list.append(label)\n\t\tmasks_list.append(masks.float())\n\t\tbboxes_labels_list.extend(bboxes_labels)\n\t\tbboxes_valid_list.extend(bboxes_valid)\n\t\tmasks_valid_list.append(torch.tensor(masks_valid))\n\t\tresize_list.append(resize)\n\t\tquestions_list.append(questions)\n\t\tsampled_classes_list.append(sampled_classes)\n\t\tcnt += len(conversations)\n\t\toffset_list.append(cnt)\n\t\tinferences.append(inference)\n\n\tif use_mm_start_end:\n\t\t# replace <image> token\n\t\tfor i in range(len(conversation_list)):\n\t\t\treplace_token = DEFAULT_IMAGE_TOKEN\n\t\t\treplace_token = (\n\t\t\t\tDEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN\n\t\t\t)\n\t\t\tconversation_list[i] = conversation_list[i].replace(\n\t\t\t\tDEFAULT_IMAGE_TOKEN, replace_token\n\t\t\t)\n\tinput_ids = [\n\t\ttokenizer_image_token(prompt, tokenizer, return_tensors=\"pt\")\n\t\tfor prompt in conversation_list\n\t]\n\tinput_ids = torch.nn.utils.rnn.pad_sequence(\n\t\tinput_ids, batch_first=True, padding_value=tokenizer.pad_token_id\n\t)\n\tattention_masks = input_ids.ne(tokenizer.pad_token_id)\n\n\tfor i in range(len(bboxes_valid_list)):\n\t\tbboxes_valid = bboxes_valid_list[i]\n\t\tattention_mask = attention_masks[i]\n\t\tif not bboxes_valid:\n\t\t\tattention_mask = attention_mask & input_ids[i].ne(tokenizer(\"[LOC]\", add_special_tokens=False).input_ids[0])\n\t\t\tattention_masks[i] = attention_mask\n\n\tconv = conversation_lib.default_conversation.copy()\n\ttargets = input_ids.clone()\n\n\tif conv_type == \"llava_v1\":\n\t\tsep = conv.sep + conv.roles[1] + \": \"\n\telse:\n\t\tsep = \"[/INST] \"\n\tfor conversation, target in zip(conversation_list, targets):\n\t\ttotal_len = int(target.ne(tokenizer.pad_token_id).sum())\n\n\t\trounds = conversation.split(conv.sep2)\n\t\tcur_len = 1\n\t\ttarget[:cur_len] = IGNORE_INDEX\n\t\tfor i, rou in enumerate(rounds):\n\t\t\tif rou == \"\":\n\t\t\t\tbreak\n\n\t\t\tparts = rou.split(sep)\n\t\t\t# if len(parts) != 2:\n\t\t\t# break\n\t\t\tassert len(parts) == 2, (len(parts), rou)\n\t\t\tparts[0] += sep\n\n\t\t\tif DEFAULT_IMAGE_TOKEN in conversation:\n\t\t\t\tround_len = len(tokenizer_image_token(rou, tokenizer))\n\t\t\t\tinstruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2\n\t\t\telse:\n\t\t\t\tround_len = len(tokenizer(rou).input_ids)\n\t\t\t\tinstruction_len = len(tokenizer(parts[0]).input_ids) - 2\n\n\t\t\ttarget[cur_len : cur_len + instruction_len] = IGNORE_INDEX\n\n\t\t\tcur_len += round_len\n\t\ttarget[cur_len:] = IGNORE_INDEX\n\n\t\tif False:\n\t\t\tz = target.clone()\n\t\t\tz = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z)\n\t\t\tif local_rank == 0:\n\t\t\t\tprint(\n\t\t\t\t\t\"conversation: \",\n\t\t\t\t\tconversation,\n\t\t\t\t\t\"tokenizer.decode(z): \",\n\t\t\t\t\ttokenizer.decode(z),\n\t\t\t\t)\n\n\t\tif cur_len < tokenizer.model_max_length:\n\t\t\tassert cur_len == total_len\n\n\tif inferences[0] == False:\n\t\ttruncate_len = tokenizer.model_max_length - 255\n\n\t\tif input_ids.shape[1] > truncate_len:\n\t\t\tinput_ids = input_ids[:, :truncate_len]\n\t\t\ttargets = targets[:, :truncate_len]\n\t\t\tattention_masks = attention_masks[:, :truncate_len]\n\n\treturn {\n\t\t\"image_paths\": image_path_list,\n\t\t\"images\": torch.stack(images_list, dim=0),\n\t\t\"images_clip\": torch.stack(images_clip_list, dim=0),\n\t\t\"input_ids\": input_ids,\n\t\t\"labels\": targets,\n\t\t\"bboxes_labels_list\": bboxes_labels_list,\n\t\t\"bboxes_valid_list\": torch.tensor(bboxes_valid_list),\n\t\t\"masks_valid_list\": masks_valid_list,\n\t\t\"attention_masks\": attention_masks,\n\t\t\"masks_list\": masks_list,\n\t\t\"label_list\": label_list,\n\t\t\"resize_list\": resize_list,\n\t\t\"offset\": torch.LongTensor(offset_list),\n\t\t\"questions_list\": questions_list,\n\t\t\"sampled_classes_list\": sampled_classes_list,\n\t\t\"inference\": inferences[0],\n\t\t\"conversation_list\": conversation_list,\n\t}" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "AverageMeter", "path": "VisualSearch/utils/utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=\":f\", summary_type=Summary.AVERAGE):\n self.name = name\n self.fmt = fmt\n self.summary_type = summary_type\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def all_reduce(self):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if isinstance(self.sum, np.ndarray):\n total = torch.tensor(\n self.sum.tolist()\n + [\n self.count,\n ],\n dtype=torch.float32,\n device=device,\n )\n else:\n total = torch.tensor(\n [self.sum, self.count], dtype=torch.float32, device=device\n )\n\n dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)\n if total.shape[0] > 2:\n self.sum, self.count = total[:-1].cpu().numpy(), total[-1].cpu().item()\n else:\n self.sum, self.count = total.tolist()\n self.avg = self.sum / (self.count + 1e-5)\n\n def __str__(self):\n fmtstr = \"{name} {val\" + self.fmt + \"} ({avg\" + self.fmt + \"})\"\n return fmtstr.format(**self.__dict__)\n\n def summary(self):\n fmtstr = \"\"\n if self.summary_type is Summary.NONE:\n fmtstr = \"\"\n elif self.summary_type is Summary.AVERAGE:\n fmtstr = \"{name} {avg:.3f}\"\n elif self.summary_type is Summary.SUM:\n fmtstr = \"{name} {sum:.3f}\"\n elif self.summary_type is Summary.COUNT:\n fmtstr = \"{name} {count:.3f}\"\n else:\n raise ValueError(\"invalid summary type %r\" % self.summary_type)\n\n return fmtstr.format(**self.__dict__)" }, { "identifier": "ProgressMeter", "path": "VisualSearch/utils/utils.py", "snippet": "class ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print(\"\\t\".join(entries))\n\n def display_summary(self):\n entries = [\" *\"]\n entries += [meter.summary() for meter in self.meters]\n print(\" \".join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = \"{:\" + str(num_digits) + \"d}\"\n return \"[\" + fmt + \"/\" + fmt.format(num_batches) + \"]\"" }, { "identifier": "Summary", "path": "VisualSearch/utils/utils.py", "snippet": "class Summary(Enum):\n NONE = 0\n AVERAGE = 1\n SUM = 2\n COUNT = 3" }, { "identifier": "dict_to_cuda", "path": "VisualSearch/utils/utils.py", "snippet": "def dict_to_cuda(input_dict):\n for k, v in input_dict.items():\n if isinstance(input_dict[k], torch.Tensor):\n input_dict[k] = v.cuda(non_blocking=True)\n elif (\n isinstance(input_dict[k], list)\n and len(input_dict[k]) > 0\n and isinstance(input_dict[k][0], torch.Tensor)\n ):\n input_dict[k] = [ele.cuda(non_blocking=True) for ele in v]\n return input_dict" }, { "identifier": "intersectionAndUnionGPU", "path": "VisualSearch/utils/utils.py", "snippet": "def intersectionAndUnionGPU(output, target, K, ignore_index=255):\n # 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.\n assert output.dim() in [1, 2, 3]\n assert output.shape == target.shape\n output = output.view(-1)\n target = target.view(-1)\n output[target == ignore_index] = ignore_index\n intersection = output[output == target]\n area_intersection = torch.histc(intersection, bins=K, min=0, max=K - 1)\n area_output = torch.histc(output, bins=K, min=0, max=K - 1)\n area_target = torch.histc(target, bins=K, min=0, max=K - 1)\n area_union = area_output + area_target - area_intersection\n return area_intersection, area_union, area_target" } ]
import argparse import os import shutil import sys import time import deepspeed import torch import tqdm import transformers from functools import partial from peft import LoraConfig, get_peft_model from torch.utils.tensorboard import SummaryWriter from VisualSearch.model.VSM import VSMForCausalLM from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.utils.dataset import HybridDataset, ValDataset, collate_fn from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, AverageMeter, ProgressMeter, Summary, dict_to_cuda, intersectionAndUnionGPU)
11,396
"--version", default="LLaVA-7B-v1.1" ) parser.add_argument( "--precision", default="bf16", type=str, choices=["fp32", "bf16", "fp16"], help="precision for training", ) parser.add_argument("--model_max_length", default=512, type=int) parser.add_argument("--lora_r", default=8, type=int) parser.add_argument( "--vision-tower", default="openai/clip-vit-large-patch14", type=str ) parser.add_argument("--load_in_8bit", action="store_true", default=False) parser.add_argument("--load_in_4bit", action="store_true", default=False) parser.add_argument( "--dataset", default="general_segdet||refer_seg||mixed_grounding||vqa", type=str ) parser.add_argument("--sample_rates", default="15,4,4,15", type=str) parser.add_argument( "--general_segdet_data", default="objects365||cocostuff||paco_lvis", type=str, ) parser.add_argument("--general_segdet_sample_rates", default="2,1,1", type=str) parser.add_argument( "--refer_seg_data", default="refclef||refcoco||refcoco+||refcocog", type=str ) parser.add_argument("--vqa_data", default="possible_locations_conv_86k||llava_instruct_80k", type=str) parser.add_argument("--vqa_sample_rates", default="2,1", type=str) parser.add_argument("--val_dataset", default="refcoco|unc|val", type=str) parser.add_argument("--dataset_dir", default="data", type=str) parser.add_argument("--log_base_dir", default="./runs", type=str) parser.add_argument("--exp_name", default="vsm", type=str) parser.add_argument("--epochs", default=40, type=int) parser.add_argument("--steps_per_epoch", default=2500, type=int) parser.add_argument( "--batch_size", default=4, type=int, help="batch size per device per step" ) parser.add_argument( "--grad_accumulation_steps", default=2, type=int, ) parser.add_argument("--val_batch_size", default=1, type=int) parser.add_argument("--workers", default=2, type=int) parser.add_argument("--lr", default=0.0001, type=float) parser.add_argument("--ce_loss_weight", default=1.0, type=float) parser.add_argument("--dice_loss_weight", default=0.5, type=float) parser.add_argument("--bce_loss_weight", default=2.0, type=float) parser.add_argument("--det_loss_weight", default=0.1, type=float) parser.add_argument("--lora_alpha", default=16, type=int) parser.add_argument("--lora_dropout", default=0.05, type=float) parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str) parser.add_argument("--explanatory", default=0.1, type=float) parser.add_argument("--beta1", default=0.9, type=float) parser.add_argument("--beta2", default=0.95, type=float) parser.add_argument("--num_classes_per_sample", default=3, type=int) parser.add_argument("--exclude_val", action="store_true", default=False) parser.add_argument("--no_eval", action="store_true", default=False) parser.add_argument("--out_dim", default=512, type=int) parser.add_argument("--weight", type=str) parser.add_argument("--resume", default="", type=str) parser.add_argument("--print_freq", default=1, type=int) parser.add_argument("--start_epoch", default=0, type=int) parser.add_argument("--gradient_checkpointing", action="store_true", default=True) parser.add_argument("--train_mask_decoder", action="store_true", default=True) parser.add_argument("--use_mm_start_end", action="store_true", default=True) parser.add_argument("--auto_resume", action="store_true", default=False) parser.add_argument( "--conv_type", default="llava_v1", type=str, choices=["llava_v1", "llava_llama_2"], ) return parser.parse_args(args) def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=1) def iou(bbox1, bbox2): x1 = max(bbox1[0], bbox2[0]) y1 = max(bbox1[1], bbox2[1]) x2 = min(bbox1[2], bbox2[2]) y2 = min(bbox1[3], bbox2[3]) w1 = bbox1[2] - bbox1[0] h1 = bbox1[3] - bbox1[1] w2 = bbox2[2] - bbox2[0] h2 = bbox2[3] - bbox2[1] inter_area = max(0, x2 - x1) * max(0, y2 - y1) return inter_area/(w1*h1+w2*h2-inter_area) def main(args): args = parse_args(args) args.log_dir = os.path.join(args.log_base_dir, args.exp_name) if args.local_rank == 0: os.makedirs(args.log_dir, exist_ok=True) writer = SummaryWriter(args.log_dir) else: writer = None # Create model tokenizer = transformers.AutoTokenizer.from_pretrained( args.version, cache_dir=None, model_max_length=args.model_max_length, padding_side="right", use_fast=False, ) tokenizer.pad_token = tokenizer.unk_token num_added_tokens = tokenizer.add_tokens("[LOC]") args.loc_token_idx = tokenizer("[LOC]", add_special_tokens=False).input_ids[0] if args.use_mm_start_end: tokenizer.add_tokens(
def parse_args(args): parser = argparse.ArgumentParser(description="VisualSearch Model Training") parser.add_argument("--local_rank", default=0, type=int, help="node rank") parser.add_argument( "--version", default="LLaVA-7B-v1.1" ) parser.add_argument( "--precision", default="bf16", type=str, choices=["fp32", "bf16", "fp16"], help="precision for training", ) parser.add_argument("--model_max_length", default=512, type=int) parser.add_argument("--lora_r", default=8, type=int) parser.add_argument( "--vision-tower", default="openai/clip-vit-large-patch14", type=str ) parser.add_argument("--load_in_8bit", action="store_true", default=False) parser.add_argument("--load_in_4bit", action="store_true", default=False) parser.add_argument( "--dataset", default="general_segdet||refer_seg||mixed_grounding||vqa", type=str ) parser.add_argument("--sample_rates", default="15,4,4,15", type=str) parser.add_argument( "--general_segdet_data", default="objects365||cocostuff||paco_lvis", type=str, ) parser.add_argument("--general_segdet_sample_rates", default="2,1,1", type=str) parser.add_argument( "--refer_seg_data", default="refclef||refcoco||refcoco+||refcocog", type=str ) parser.add_argument("--vqa_data", default="possible_locations_conv_86k||llava_instruct_80k", type=str) parser.add_argument("--vqa_sample_rates", default="2,1", type=str) parser.add_argument("--val_dataset", default="refcoco|unc|val", type=str) parser.add_argument("--dataset_dir", default="data", type=str) parser.add_argument("--log_base_dir", default="./runs", type=str) parser.add_argument("--exp_name", default="vsm", type=str) parser.add_argument("--epochs", default=40, type=int) parser.add_argument("--steps_per_epoch", default=2500, type=int) parser.add_argument( "--batch_size", default=4, type=int, help="batch size per device per step" ) parser.add_argument( "--grad_accumulation_steps", default=2, type=int, ) parser.add_argument("--val_batch_size", default=1, type=int) parser.add_argument("--workers", default=2, type=int) parser.add_argument("--lr", default=0.0001, type=float) parser.add_argument("--ce_loss_weight", default=1.0, type=float) parser.add_argument("--dice_loss_weight", default=0.5, type=float) parser.add_argument("--bce_loss_weight", default=2.0, type=float) parser.add_argument("--det_loss_weight", default=0.1, type=float) parser.add_argument("--lora_alpha", default=16, type=int) parser.add_argument("--lora_dropout", default=0.05, type=float) parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str) parser.add_argument("--explanatory", default=0.1, type=float) parser.add_argument("--beta1", default=0.9, type=float) parser.add_argument("--beta2", default=0.95, type=float) parser.add_argument("--num_classes_per_sample", default=3, type=int) parser.add_argument("--exclude_val", action="store_true", default=False) parser.add_argument("--no_eval", action="store_true", default=False) parser.add_argument("--out_dim", default=512, type=int) parser.add_argument("--weight", type=str) parser.add_argument("--resume", default="", type=str) parser.add_argument("--print_freq", default=1, type=int) parser.add_argument("--start_epoch", default=0, type=int) parser.add_argument("--gradient_checkpointing", action="store_true", default=True) parser.add_argument("--train_mask_decoder", action="store_true", default=True) parser.add_argument("--use_mm_start_end", action="store_true", default=True) parser.add_argument("--auto_resume", action="store_true", default=False) parser.add_argument( "--conv_type", default="llava_v1", type=str, choices=["llava_v1", "llava_llama_2"], ) return parser.parse_args(args) def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=1) def iou(bbox1, bbox2): x1 = max(bbox1[0], bbox2[0]) y1 = max(bbox1[1], bbox2[1]) x2 = min(bbox1[2], bbox2[2]) y2 = min(bbox1[3], bbox2[3]) w1 = bbox1[2] - bbox1[0] h1 = bbox1[3] - bbox1[1] w2 = bbox2[2] - bbox2[0] h2 = bbox2[3] - bbox2[1] inter_area = max(0, x2 - x1) * max(0, y2 - y1) return inter_area/(w1*h1+w2*h2-inter_area) def main(args): args = parse_args(args) args.log_dir = os.path.join(args.log_base_dir, args.exp_name) if args.local_rank == 0: os.makedirs(args.log_dir, exist_ok=True) writer = SummaryWriter(args.log_dir) else: writer = None # Create model tokenizer = transformers.AutoTokenizer.from_pretrained( args.version, cache_dir=None, model_max_length=args.model_max_length, padding_side="right", use_fast=False, ) tokenizer.pad_token = tokenizer.unk_token num_added_tokens = tokenizer.add_tokens("[LOC]") args.loc_token_idx = tokenizer("[LOC]", add_special_tokens=False).input_ids[0] if args.use_mm_start_end: tokenizer.add_tokens(
[DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
5
2023-12-15 14:58:24+00:00
16k
foocker/Bert-VITS2-Faster
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_spk: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.zeros(1024, len(phone))\n en_bert = torch.zeros(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.zeros(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.zeros(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.zeros(1024, len(phone))\n ja_bert = torch.zeros(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, sid, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n # torch.save(self.enc_p.state_dict(), 'enc_p.pth')\n logw = self.sdp(x, x_mask, g=g, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n # torch.save(self.sdp.state_dict(), 'sdp.pth')\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n # y_lenghts 变更了\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n \n def infer_export(\n self,\n path,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None):\n \n x_cp = torch.LongTensor(x.clone().cpu())\n x_lengths_cp = torch.LongTensor(x_lengths.clone().cpu())\n sid_cp = torch.LongTensor(sid.clone().cpu())\n tone_cp = torch.LongTensor(tone.clone().cpu())\n language_cp = torch.LongTensor(language.clone().cpu())\n bert_cp = bert.clone().cpu()\n ja_bert_cp = ja_bert.clone().cpu()\n en_bert_cp = en_bert.clone().cpu()\n \n exported_onnx_dir = \"onnx_exports\"\n if not os.path.exists(f'{exported_onnx_dir}/{path}'):\n os.makedirs(f'{exported_onnx_dir}/{path}', exist_ok=True)\n print(f'{exported_onnx_dir}/{path}')\n \n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n self.emb_g.cpu()\n torch.onnx.export(\n self.emb_g,\n (sid_cp),\n f\"{exported_onnx_dir}/{path}/emb.onnx\",\n input_names=[\"sid\"],\n output_names=[\"g\"],\n verbose=False,\n opset_version=17,\n \n )\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n self.emb_g.to('cuda')\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n self.enc_p.eval()\n self.enc_p.to('cpu')\n\n torch.onnx.export(\n self.enc_p,\n (x_cp, x_lengths_cp, tone_cp, language_cp, bert_cp, ja_bert_cp, en_bert_cp, g.cpu()),\n f\"{exported_onnx_dir}/{path}/enc.onnx\",\n input_names=[\n \"x\",\n \"x_lengths\",\n \"tone\",\n \"language\",\n \"bert\",\n \"ja_bert\",\n \"en_bert\",\n \"g\",\n ],\n output_names=[\"xout\", \"m_p\", \"logs_p\", \"x_mask\"],\n dynamic_axes={\n \"x\": [1],\n \"x_lengths\": [0],\n \"tone\": [1],\n \"language\": [1],\n \"bert\": [2],\n \"ja_bert\": [2],\n \"en_bert\": [2],\n \"xout\": [2],\n \"m_p\": [2],\n \"logs_p\": [2],\n \"x_mask\": [2],\n },\n verbose=False,\n opset_version=17,\n )\n\n self.enc_p.to('cuda')\n print('start sdp!')\n \n logw = self.sdp(x, x_mask, g=g, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n\n self.sdp.eval()\n self.sdp.to('cpu')\n self.dp.to('cpu')\n\n noise_scale_w = 0.8*torch.ones((1,), dtype=torch.float32)\n \n # \n # sdp_state_dict = self.sdp.state_dict()\n # torch.save(sdp_state_dict, 'sdp_weights.pth')\n \n torch.onnx.export(\n self.sdp,\n (x.cpu(), x_mask.cpu(), g.cpu(), noise_scale_w.cpu()),\n f\"{exported_onnx_dir}/{path}/sdp.onnx\",\n input_names=[\"x\", \"x_mask\", \"g\", \"noise_scale_w\"],\n output_names=[\"logw\"],\n # dynamic_axes={\"x\": [0, 2], \"x_mask\": [0, 2], \"logw\": [0, 2]},\n dynamic_axes={\"x\": [2], \"x_mask\": [2], \"logw\": [2]},\n verbose=False,\n opset_version=17\n )\n torch.onnx.export(\n self.dp,\n (x.cpu(), x_mask.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/dp.onnx\",\n input_names=[\"x\", \"x_mask\", \"g\"],\n output_names=[\"logw\"],\n # dynamic_axes={\"x\": [0, 2], \"x_mask\": [0, 2], \"logw\": [0, 2]},\n dynamic_axes={\"x\": [2], \"x_mask\": [2], \"logw\": [2]},\n verbose=False,\n opset_version=17,\n )\n \n self.sdp.to('cuda')\n self.dp.to('cuda')\n \n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n \n z = self.flow(z_p, y_mask, g=g, reverse=True)\n self.flow.to(\"cpu\")\n torch.onnx.export(\n self.flow,\n (z_p.cpu(), y_mask.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/flow.onnx\",\n input_names=[\"z_p\", \"y_mask\", \"g\"],\n output_names=[\"z\"],\n # dynamic_axes={\"z_p\": [0, 2], \"y_mask\": [0, 2], \"z\": [0, 2]},\n dynamic_axes={\"z_p\": [2], \"y_mask\": [2], \"z\": [2]},\n verbose=False,\n opset_version=17,\n )\n self.flow.to(\"cuda\")\n \n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n self.dec.to('cpu')\n z_in = (z * y_mask)[:, :, :max_len]\n torch.onnx.export(\n self.dec,\n (z_in.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/dec.onnx\",\n input_names=[\"z_in\", \"g\"],\n output_names=[\"o\"],\n # dynamic_axes={\"z_in\": [0, 2], \"o\": [0, 2]},\n dynamic_axes={\"z_in\": [2], \"o\": [2]},\n verbose=False,\n opset_version=17,\n )\n self.dec.to('cuda')\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
13,311
y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) # 多卡训练设置 backend = "nccl" if platform.system() == "Windows": backend = "gloo" dist.init_process_group( backend=backend, init_method="env://", # If Windows,switch to gloo backend. ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank]) net_d = DDP(net_d, device_ids=[local_rank]) dur_resume_lr = None if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], find_unused_parameters=True ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
loss_fm = feature_loss(fmap_r, fmap_g)
9
2023-12-18 09:53:41+00:00
16k
sinoyou/nelf-pro
nerfstudio/cameras/camera_paths.py
[ { "identifier": "camera_utils", "path": "nerfstudio/cameras/camera_utils.py", "snippet": "_EPS = np.finfo(float).eps * 4.0\n M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]\n K = np.array(\n [\n [m00 - m11 - m22, 0.0, 0.0, 0.0],\n [m01 + m10, m11 - m00 - m22, 0.0, 0.0],\n [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],\n [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],\n ]\n )\ndef unit_vector(data, axis: Optional[int] = None) -> np.ndarray:\ndef quaternion_from_matrix(matrix, isprecise: bool = False) -> np.ndarray:\ndef quaternion_slerp(quat0, quat1, fraction: float, spin: int = 0, shortestpath: bool = True) -> np.ndarray:\ndef quaternion_matrix(quaternion) -> np.ndarray:\ndef get_interpolated_poses(pose_a, pose_b, steps: int = 10) -> List[float]:\ndef get_interpolated_k(k_a, k_b, steps: int = 10) -> TensorType[3, 4]:\ndef get_interpolated_poses_many(\n poses: TensorType[\"num_poses\", 3, 4],\n Ks: TensorType[\"num_poses\", 3, 3],\n steps_per_transition=10,\n) -> Tuple[TensorType[\"num_poses\", 3, 4], TensorType[\"num_poses\", 3, 3]]:\ndef normalize(x) -> TensorType[...]:\ndef viewmatrix(lookat, up, pos) -> TensorType[...]:\ndef get_distortion_params(\n k1: float = 0.0,\n k2: float = 0.0,\n k3: float = 0.0,\n k4: float = 0.0,\n p1: float = 0.0,\n p2: float = 0.0,\n) -> TensorType[...]:\ndef _compute_residual_and_jacobian(\n x: torch.Tensor,\n y: torch.Tensor,\n xd: torch.Tensor,\n yd: torch.Tensor,\n distortion_params: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,]:\ndef radial_and_tangential_undistort(\n coords: torch.Tensor,\n distortion_params: torch.Tensor,\n eps: float = 1e-3,\n max_iterations: int = 10,\n) -> torch.Tensor:\ndef rotation_matrix(a: TensorType[3], b: TensorType[3]) -> TensorType[3, 3]:\ndef auto_orient_and_center_poses(\n poses: TensorType[\"num_poses\":..., 4, 4], method: Literal[\"pca\", \"up\", \"none\"] = \"up\", center_poses: bool = True\n) -> TensorType[\"num_poses\":..., 3, 4]:" }, { "identifier": "get_interpolated_poses_many", "path": "nerfstudio/cameras/camera_utils.py", "snippet": "def get_interpolated_poses_many(\n poses: TensorType[\"num_poses\", 3, 4],\n Ks: TensorType[\"num_poses\", 3, 3],\n steps_per_transition=10,\n) -> Tuple[TensorType[\"num_poses\", 3, 4], TensorType[\"num_poses\", 3, 3]]:\n \"\"\"Return interpolated poses for many camera poses.\n\n Args:\n poses: list of camera poses\n Ks: list of camera intrinsics\n steps_per_transition: number of steps per transition\n\n Returns:\n tuple of new poses and intrinsics\n \"\"\"\n traj = []\n Ks_new = []\n for idx in range(poses.shape[0] - 1):\n pose_a = poses[idx]\n pose_b = poses[idx + 1]\n poses_ab = get_interpolated_poses(pose_a, pose_b, steps=steps_per_transition)\n traj += poses_ab\n Ks_new += get_interpolated_k(Ks[idx], Ks[idx + 1], steps_per_transition)\n return torch.tensor(traj), torch.tensor(Ks_new)" }, { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras\n down the line in cases where your batches of camera data don't come from the same cameras.\n\n If a single value is provided, it is broadcasted to all cameras.\n\n Args:\n camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format\n fx: Focal length x\n fy: Focal length y\n cx: Principal point x\n cy: Principal point y\n width: Image width\n height: Image height\n distortion_params: OpenCV 6 radial distortion coefficients\n camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.\n times: Timestamps for each camera\n probe_config: dict config containing the generated probe information (core and basis)\n \"\"\"\n\n camera_to_worlds: TensorType[\"num_cameras\":..., 3, 4]\n fx: TensorType[\"num_cameras\":..., 1]\n fy: TensorType[\"num_cameras\":..., 1]\n cx: TensorType[\"num_cameras\":..., 1]\n cy: TensorType[\"num_cameras\":..., 1]\n width: TensorType[\"num_cameras\":..., 1]\n height: TensorType[\"num_cameras\":..., 1]\n distortion_params: Optional[TensorType[\"num_cameras\":..., 6]]\n camera_type: TensorType[\"num_cameras\":..., 1]\n times: Optional[TensorType[\"num_cameras\":..., 1]]\n image_filenames: Optional[List[str]]\n probe_config: Optional[list]\n\n def __init__(\n self,\n camera_to_worlds: TensorType[\"batch_c2ws\":..., 3, 4],\n fx: Union[TensorType[\"batch_fxs\":..., 1], float],\n fy: Union[TensorType[\"batch_fys\":..., 1], float],\n cx: Union[TensorType[\"batch_cxs\":..., 1], float],\n cy: Union[TensorType[\"batch_cys\":..., 1], float],\n width: Optional[Union[TensorType[\"batch_ws\":..., 1], int]] = None,\n height: Optional[Union[TensorType[\"batch_hs\":..., 1], int]] = None,\n distortion_params: Optional[TensorType[\"batch_dist_params\":..., 6]] = None,\n camera_type: Optional[\n Union[\n TensorType[\"batch_cam_types\":..., 1],\n int,\n List[CameraType],\n CameraType,\n ]\n ] = CameraType.PERSPECTIVE,\n times: Optional[TensorType[\"num_cameras\"]] = None,\n image_filenames: Optional[List[str]] = None,\n probe_config: Optional[list] = None\n ):\n \"\"\"Initializes the Cameras object.\n\n Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]\n (in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or\n TensorType[1] (in the case of the rest of the elements). The dimensions before that are\n considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast\n all the tensors to be the same batch dimension. This means you can use any combination of the\n input types in the function signature and it won't break. Your batch size for all tensors\n must be broadcastable to the same size, and the resulting number of batch dimensions will be\n the batch dimension with the largest number of dimensions.\n \"\"\"\n\n # This will notify the tensordataclass that we have a field with more than 1 dimension\n self._field_custom_dimensions = {\"camera_to_worlds\": 2}\n\n self.camera_to_worlds = camera_to_worlds\n\n # fx fy calculation\n self.fx = self._init_get_fc_xy(fx, \"fx\") # @dataclass's post_init will take care of broadcasting\n self.fy = self._init_get_fc_xy(fy, \"fy\") # @dataclass's post_init will take care of broadcasting\n\n # cx cy calculation\n self.cx = self._init_get_fc_xy(cx, \"cx\") # @dataclass's post_init will take care of broadcasting\n self.cy = self._init_get_fc_xy(cy, \"cy\") # @dataclass's post_init will take care of broadcasting\n\n # Distortion Params Calculation:\n self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting\n\n # @dataclass's post_init will take care of broadcasting\n self.height = self._init_get_height_width(height, self.cy)\n self.width = self._init_get_height_width(width, self.cx)\n self.camera_type = self._init_get_camera_type(camera_type)\n self.times = self._init_get_times(times)\n \n self.image_filenames = image_filenames\n self.probe_config = probe_config\n if self.probe_config is not None:\n self.probe = Probes(self.camera_to_worlds, self.probe_config)\n else:\n self.probe = None\n \n self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors\n\n def _init_get_fc_xy(self, fc_xy, name):\n \"\"\"\n Parses the input focal length / principle point x or y and returns a tensor of the correct shape\n\n Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we\n just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.\n\n Args:\n fc_xy: The focal length / principle point x or y\n name: The name of the variable. Used for error messages\n \"\"\"\n if isinstance(fc_xy, float):\n fc_xy = torch.Tensor([fc_xy], device=self.device)\n elif isinstance(fc_xy, torch.Tensor):\n if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:\n fc_xy = fc_xy.unsqueeze(-1)\n fc_xy = fc_xy.to(self.device)\n else:\n raise ValueError(f\"{name} must be a float or tensor, got {type(fc_xy)}\")\n return fc_xy\n\n def _init_get_camera_type(\n self,\n camera_type: Union[\n TensorType[\"batch_cam_types\":..., 1], TensorType[\"batch_cam_types\":...], int, List[CameraType], CameraType\n ],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument camera_type\n\n Camera Type Calculation:\n If CameraType, convert to int and then to tensor, then broadcast to all cameras\n If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n\n Args:\n camera_type: camera_type argument from __init__()\n \"\"\"\n if isinstance(camera_type, CameraType):\n camera_type = torch.tensor([camera_type.value], device=self.device)\n elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):\n camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)\n elif isinstance(camera_type, int):\n camera_type = torch.tensor([camera_type], device=self.device)\n elif isinstance(camera_type, torch.Tensor):\n assert not torch.is_floating_point(\n camera_type\n ), f\"camera_type tensor must be of type int, not: {camera_type.dtype}\"\n camera_type = camera_type.to(self.device)\n if camera_type.ndim == 0 or camera_type.shape[-1] != 1:\n camera_type = camera_type.unsqueeze(-1)\n # assert torch.all(\n # camera_type.view(-1)[0] == camera_type\n # ), \"Batched cameras of different camera_types will be allowed in the future.\"\n else:\n raise ValueError(\n 'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor[\"num_cameras\"]. \\\n Received: '\n + str(type(camera_type))\n )\n return camera_type\n\n def _init_get_height_width(\n self,\n h_w: Union[TensorType[\"batch_hws\":..., 1], TensorType[\"batch_hws\":...], int, None],\n c_x_y: TensorType[\"batch_cxys\":...],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument for height or width\n\n Height/Width Calculation:\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n If none, use cx or cy * 2\n Else raise error\n\n Args:\n h_w: height or width argument from __init__()\n c_x_y: cx or cy for when h_w == None\n \"\"\"\n if isinstance(h_w, int):\n h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)\n elif isinstance(h_w, torch.Tensor):\n assert not torch.is_floating_point(h_w), f\"height and width tensor must be of type int, not: {h_w.dtype}\"\n h_w = h_w.to(torch.int64).to(self.device)\n if h_w.ndim == 0 or h_w.shape[-1] != 1:\n h_w = h_w.unsqueeze(-1)\n # assert torch.all(h_w == h_w.view(-1)[0]), \"Batched cameras of different h, w will be allowed in the future.\"\n elif h_w is None:\n h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))\n else:\n raise ValueError(\"Height must be an int, tensor, or None, received: \" + str(type(h_w)))\n return h_w\n\n def _init_get_times(self, times):\n if times is None:\n times = None\n elif isinstance(times, torch.Tensor):\n if times.ndim == 0 or times.shape[-1] != 1:\n times = times.unsqueeze(-1).to(self.device)\n else:\n raise ValueError(f\"times must be None or a tensor, got {type(times)}\")\n\n return times\n\n @property\n def device(self):\n \"\"\"Returns the device that the camera is on.\"\"\"\n return self.camera_to_worlds.device\n\n @property\n def image_height(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.height\n\n @property\n def image_width(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.width\n\n @property\n def is_jagged(self):\n \"\"\"\n Returns whether or not the cameras are \"jagged\" (i.e. the height and widths are different, meaning that\n you cannot concatenate the image coordinate maps together)\n \"\"\"\n h_jagged = not torch.all(self.height == self.height.view(-1)[0])\n w_jagged = not torch.all(self.width == self.width.view(-1)[0])\n return h_jagged or w_jagged\n\n def get_image_coords(\n self, pixel_offset: float = 0.5, index: Optional[Tuple] = None\n ) -> TensorType[\"height\", \"width\", 2]:\n \"\"\"This gets the image coordinates of one of the cameras in this object.\n\n If no index is specified, it will return the maximum possible sized height / width image coordinate map,\n by looking at the maximum height and width of all the cameras in this object.\n\n Args:\n pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)\n index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th\n flattened camera\n\n Returns:\n Grid of image coordinates.\n \"\"\"\n if index is None:\n image_height = torch.max(self.image_height.view(-1))\n image_width = torch.max(self.image_width.view(-1))\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n else:\n image_height = self.image_height[index].item()\n image_width = self.image_width[index].item()\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n return image_coords\n\n def generate_rays( # pylint: disable=too-many-statements\n self,\n camera_indices: Union[TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"], int],\n coords: Optional[TensorType[\"num_rays\":..., 2]] = None,\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n keep_shape: Optional[bool] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices.\n\n This function will standardize the input arguments and then call the _generate_rays_from_coords function\n to generate the rays. Our goal is to parse the arguments and then get them into the right shape:\n - camera_indices: (num_rays:..., num_cameras_batch_dims)\n - coords: (num_rays:..., 2)\n - camera_opt_to_camera: (num_rays:..., 3, 4) or None\n - distortion_params_delta: (num_rays:..., 6) or None\n\n Read the docstring for _generate_rays_from_coords for more information on how we generate the rays\n after we have standardized the arguments.\n\n We are only concerned about different combinations of camera_indices and coords matrices, and the following\n are the 4 cases we have to deal with:\n 1. isinstance(camera_indices, int) and coords == None\n - In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)\n 2. isinstance(camera_indices, int) and coords != None\n - In this case, we broadcast camera_indices to the same batch dim as coords\n 3. not isinstance(camera_indices, int) and coords == None\n - In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast\n all our other args to match the new definition of num_rays := (h, w) + num_rays\n 4. not isinstance(camera_indices, int) and coords != None\n - In this case, we have nothing to do, only check that the arguments are of the correct shape\n\n There is one more edge case we need to be careful with: when we have \"jagged cameras\" (ie: different heights\n and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.\n When coords == None (ie: when we render out the whole image associated with this camera), we run into problems\n since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,\n we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,\n regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.\n\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n distortion_params_delta: Optional delta for the distortion parameters.\n keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise\n keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the\n camera_indices and coords tensors (if we can).\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords.\n \"\"\"\n # Check the argument types to make sure they're valid and all shaped correctly\n assert isinstance(camera_indices, (torch.Tensor, int)), \"camera_indices must be a tensor or int\"\n assert coords is None or isinstance(coords, torch.Tensor), \"coords must be a tensor or None\"\n assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)\n assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)\n if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):\n num_rays_shape = camera_indices.shape[:-1]\n errormsg = \"Batch dims of inputs must match when inputs are all tensors\"\n assert coords.shape[:-1] == num_rays_shape, errormsg\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg\n\n # If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later\n if not self.shape:\n cameras = self.reshape((1,))\n assert torch.all(\n torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0\n ), \"Can only index into single camera with no batch dimensions if index is zero\"\n else:\n cameras = self\n\n # If the camera indices are an int, then we need to make sure that the camera batch is 1D\n if isinstance(camera_indices, int):\n assert (\n len(cameras.shape) == 1\n ), \"camera_indices must be a tensor if cameras are batched with more than 1 batch dimension\"\n camera_indices = torch.tensor([camera_indices], device=cameras.device)\n\n assert camera_indices.shape[-1] == len(\n cameras.shape\n ), \"camera_indices must have shape (num_rays:..., num_cameras_batch_dims)\"\n\n # If keep_shape is True, then we need to make sure that the camera indices in question\n # are all the same height and width and can actually be batched while maintaining the image\n # shape\n if keep_shape is True:\n assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(\n cameras.width[camera_indices] == cameras.width[camera_indices[0]]\n ), \"Can only keep shape if all cameras have the same height and width\"\n\n # If the cameras don't all have same height / width, if coords is not none, we will need to generate\n # a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.\n # Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial\n if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):\n index_dim = camera_indices.shape[-1]\n camera_indices = camera_indices.reshape(-1, index_dim)\n _coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]\n camera_indices = torch.cat(\n [index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],\n )\n coords = torch.cat(_coords, dim=0)\n assert coords.shape[0] == camera_indices.shape[0]\n # Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them\n\n # The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords\n # is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,\n # each image in camera_indices has to have the same shape since otherwise we would have error'd when\n # we checked keep_shape is valid or we aren't jagged.\n if coords is None:\n index_dim = camera_indices.shape[-1]\n index = camera_indices.reshape(-1, index_dim)[0]\n coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)\n coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)\n coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)\n camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None\n camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))\n if camera_opt_to_camera is not None\n else None\n )\n distortion_params_delta = ( # (h, w, num_rays, 6) or None\n distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))\n if distortion_params_delta is not None\n else None\n )\n\n # If camera indices was an int or coords was none, we need to broadcast our indices along batch dims\n camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)\n\n # Checking our tensors have been standardized\n assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)\n assert camera_indices.shape[-1] == len(cameras.shape)\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]\n\n # This will do the actual work of generating the rays now that we have standardized the inputs\n # raybundle.shape == (num_rays) when done\n # pylint: disable=protected-access\n raybundle = cameras._generate_rays_from_coords(\n camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion\n )\n\n # If we have mandated that we don't keep the shape, then we flatten\n if keep_shape is False:\n raybundle = raybundle.flatten()\n\n # TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,\n # so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour\n # that we haven't caught yet with tests\n return raybundle\n\n # pylint: disable=too-many-statements\n def _generate_rays_from_coords(\n self,\n camera_indices: TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"],\n coords: TensorType[\"num_rays\":..., 2],\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices and coords where self isn't jagged\n\n This is a fairly complex function, so let's break this down slowly.\n\n Shapes involved:\n - num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated\n - num_cameras_batch_dims: This is the number of dimensions of our camera\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n The shape of this is such that indexing into camera_indices[\"num_rays\":...] will return the\n index into each batch dimension of the camera in order to get the correct camera specified by\n \"num_rays\".\n Example:\n >>> cameras = Cameras(...)\n >>> cameras.shape\n (2, 3, 4)\n >>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3\n >>> camera_indices.shape\n (3,)\n >>> coords = torch.tensor([1,1])\n >>> coords.shape\n (2,)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()\n >>> out_rays.shape\n ()\n >>> camera_indices = torch.tensor([[0,0,0]])\n >>> camera_indices.shape\n (1, 3)\n >>> coords = torch.tensor([[1,1]])\n >>> coords.shape\n (1, 2)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)\n # since we added an extra dimension in front of camera_indices\n >>> out_rays.shape\n (1,)\n\n If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape\n\n The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the\n output shape and if you index into the output RayBundle with some indices [i:...], if you index into\n camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch\n indices into the original cameras object corresponding to that ray (ie: you will get the camera\n from our batched cameras corresponding to the ray at RayBundle[i:...]).\n\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning\n height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will\n get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].\n\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].\n\n distortion_params_delta: Optional delta for the distortion parameters.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].\n\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords. RayBundle.shape == num_rays\n \"\"\"\n # Make sure we're on the right devices\n camera_indices = camera_indices.to(self.device)\n coords = coords.to(self.device)\n\n # Checking to make sure everything is of the right shape and type\n num_rays_shape = camera_indices.shape[:-1]\n assert camera_indices.shape == num_rays_shape + (self.ndim,)\n assert coords.shape == num_rays_shape + (2,)\n assert coords.shape[-1] == 2\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)\n assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)\n\n # Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all\n # of our output rays at each dimension of our cameras object\n true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]\n\n # Get all our focal lengths, principal points and make sure they are the right shapes\n y = coords[..., 0] # (num_rays,) get rid of the last dimension\n x = coords[..., 1] # (num_rays,) get rid of the last dimension\n fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)\n cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)\n assert (\n y.shape == num_rays_shape\n and x.shape == num_rays_shape\n and fx.shape == num_rays_shape\n and fy.shape == num_rays_shape\n and cx.shape == num_rays_shape\n and cy.shape == num_rays_shape\n ), (\n str(num_rays_shape)\n + str(y.shape)\n + str(x.shape)\n + str(fx.shape)\n + str(fy.shape)\n + str(cx.shape)\n + str(cy.shape)\n )\n\n # Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)\n # Also make sure the shapes are correct\n coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)\n assert (\n coord.shape == num_rays_shape + (2,)\n and coord_x_offset.shape == num_rays_shape + (2,)\n and coord_y_offset.shape == num_rays_shape + (2,)\n )\n\n # Stack image coordinates and image coordinates offset by 1, check shapes too\n coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Undistorts our images according to our distortion parameters\n if not disable_distortion:\n distortion_params = None\n if self.distortion_params is not None:\n distortion_params = self.distortion_params[true_indices]\n if distortion_params_delta is not None:\n distortion_params = distortion_params + distortion_params_delta\n elif distortion_params_delta is not None:\n distortion_params = distortion_params_delta\n\n # Do not apply distortion for equirectangular images\n if distortion_params is not None:\n mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n coord_mask = torch.stack([mask, mask, mask], dim=0)\n if mask.any():\n coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(\n coord_stack[coord_mask, :].reshape(3, -1, 2),\n distortion_params[mask, :],\n ).reshape(-1, 2)\n\n # Make sure after we have undistorted our images, the shapes are still correct\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Gets our directions for all our rays in camera coordinates and checks shapes at the end\n # Here, directions_stack is of shape (3, num_rays, 3)\n # directions_stack[0] is the direction for ray in camera coordinates\n # directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x\n # directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y\n cam_types = torch.unique(self.camera_type, sorted=False)\n directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)\n if CameraType.PERSPECTIVE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()\n directions_stack[..., 2][mask] = -1.0\n\n if CameraType.FISHEYE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))\n theta = torch.clip(theta, 0.0, math.pi)\n\n sin_theta = torch.sin(theta)\n\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()\n directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)\n\n if CameraType.EQUIRECTANGULAR.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n # For equirect, fx = fy = height = width/2\n # Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2\n theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed\n phi = torch.pi * (0.5 - coord_stack[..., 1])\n # use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)\n directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()\n directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()\n\n for value in cam_types:\n if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:\n raise ValueError(f\"Camera type {value} not supported.\")\n\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n c2w = self.camera_to_worlds[true_indices]\n assert c2w.shape == num_rays_shape + (3, 4)\n\n if camera_opt_to_camera is not None:\n c2w = pose_utils.multiply(c2w, camera_opt_to_camera)\n rotation = c2w[..., :3, :3] # (..., 3, 3)\n assert rotation.shape == num_rays_shape + (3, 3)\n\n directions_stack = torch.sum(\n directions_stack[..., None, :] * rotation, dim=-1\n ) # (..., 1, 3) * (..., 3, 3) -> (..., 3)\n\n directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)\n directions_norm = directions_norm[0]\n\n directions_stack = normalize(directions_stack, dim=-1)\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n origins = c2w[..., :3, 3] # (..., 3)\n assert origins.shape == num_rays_shape + (3,)\n\n directions = directions_stack[0]\n assert directions.shape == num_rays_shape + (3,)\n\n # norms of the vector going between adjacent coords, giving us dx and dy per output ray\n dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # (\"num_rays\":...,)\n dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # (\"num_rays\":...,)\n assert dx.shape == num_rays_shape and dy.shape == num_rays_shape\n\n pixel_area = (dx * dy)[..., None] # (\"num_rays\":..., 1)\n assert pixel_area.shape == num_rays_shape + (1,)\n\n times = self.times[camera_indices, 0] if self.times is not None else None\n\n\n return RayBundle(\n origins=origins,\n directions=directions,\n pixel_area=pixel_area,\n camera_indices=camera_indices,\n directions_norm=directions_norm,\n times=times,\n probes=self.probe,\n )\n\n def to_json(\n self, camera_idx: int, image: Optional[TensorType[\"height\", \"width\", 2]] = None, max_size: Optional[int] = None\n ) -> Dict:\n \"\"\"Convert a camera to a json dictionary.\n\n Args:\n camera_idx: Index of the camera to convert.\n image: An image in range [0, 1] that is encoded to a base64 string.\n max_size: Max size to resize the image to if present.\n\n Returns:\n A JSON representation of the camera\n \"\"\"\n flattened = self.flatten()\n json_ = {\n \"type\": \"PinholeCamera\",\n \"cx\": flattened[camera_idx].cx.item(),\n \"cy\": flattened[camera_idx].cy.item(),\n \"fx\": flattened[camera_idx].fx.item(),\n \"fy\": flattened[camera_idx].fy.item(),\n \"camera_to_world\": self.camera_to_worlds[camera_idx].tolist(),\n \"camera_index\": camera_idx,\n \"times\": flattened[camera_idx].times.item() if self.times is not None else None,\n }\n if image is not None:\n image_uint8 = (image * 255).detach().type(torch.uint8)\n if max_size is not None:\n image_uint8 = image_uint8.permute(2, 0, 1)\n image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore\n image_uint8 = image_uint8.permute(1, 2, 0)\n image_uint8 = image_uint8.cpu().numpy()\n data = cv2.imencode(\".jpg\", image_uint8)[1].tobytes()\n json_[\"image\"] = str(\"data:image/jpeg;base64,\" + base64.b64encode(data).decode(\"ascii\"))\n return json_\n\n def get_intrinsics_matrices(self) -> TensorType[\"num_cameras\":..., 3, 3]:\n \"\"\"Returns the intrinsic matrices for each camera.\n\n Returns:\n Pinhole camera intrinsics matrices\n \"\"\"\n K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)\n K[..., 0, 0] = self.fx.squeeze(-1)\n K[..., 1, 1] = self.fy.squeeze(-1)\n K[..., 0, 2] = self.cx.squeeze(-1)\n K[..., 1, 2] = self.cy.squeeze(-1)\n K[..., 2, 2] = 1.0\n return K\n\n def rescale_output_resolution(\n self,\n scaling_factor: Union[TensorType[\"num_cameras\":...], TensorType[\"num_cameras\":..., 1], float, int],\n round_hw=False,\n ) -> None:\n \"\"\"Rescale the output resolution of the cameras.\n\n Args:\n scaling_factor: Scaling factor to apply to the output resolution.\n round_hw: Whether to round the height and width to the nearest integer.\n \"\"\"\n if isinstance(scaling_factor, (float, int)):\n scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:\n scaling_factor = scaling_factor.unsqueeze(-1)\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):\n pass\n else:\n raise ValueError(\n f\"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}.\"\n )\n\n self.fx = self.fx * scaling_factor\n self.fy = self.fy * scaling_factor\n self.cx = self.cx * scaling_factor\n self.cy = self.cy * scaling_factor\n if not round_hw:\n self.height = (self.height * scaling_factor).to(torch.int64)\n self.width = (self.width * scaling_factor).to(torch.int64)\n else:\n self.height = torch.floor(self.height * scaling_factor + 0.5).to(torch.int64)\n self.width = torch.floor(self.width * scaling_factor + 0.5).to(torch.int64)\n\n def get_plotly(self, camera_group):\n\n # define local necssary coordinates for plotting\n num_cameras = self.camera_to_worlds.shape[0]\n _cam_center_c = np.array([[.0, .0, .0]]).repeat(num_cameras, axis=0)\n _cam_forward_c = np.array([[.0, .0, -1.0]]).repeat(num_cameras, axis=0)\n _cam_up_c = np.array([[.0, 1.0, .0]]).repeat(num_cameras, axis=0)\n _cam_right_c = np.array([[1.0, .0, .0]]).repeat(num_cameras, axis=0)\n\n _pyramid_width = self.width.cpu().numpy() / self.fx.cpu().numpy()\n _pyramid_height = self.height.cpu().numpy() / self.fy.cpu().numpy()\n\n _cam_pyramid_ur = np.concatenate([_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dr = np.concatenate([_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_ul = np.concatenate([-_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dl = np.concatenate([-_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n\n _local_coordinates = {\n 'center': _cam_center_c, \n 'forward': _cam_forward_c, \n 'up': _cam_up_c, \n 'right': _cam_right_c, \n 'pyramid_ur': _cam_pyramid_ur, \n 'pyramid_dr': _cam_pyramid_dr, \n 'pyramid_ul': _cam_pyramid_ul, \n 'pyramid_dl': _cam_pyramid_dl, \n }\n\n # transform it into world coordinates\n data = {}\n for k in _local_coordinates.keys():\n _local_coor_homo = np.concatenate([_local_coordinates[k].reshape(-1, 3) * plotly_camera_scale, np.ones((num_cameras, 1))], axis=-1) # num_cam, 4\n _cw = self.camera_to_worlds.cpu().numpy() # num_cam, 3, 4\n\n _homo = np.einsum('ijk,ik->ij', _cw, _local_coor_homo) # num_cam, 3\n data[k] = _homo[:, :3]\n\n plot_data = plot_camera_components(data, image_list=self.image_filenames, camera_group=camera_group)\n \n if isinstance(plot_data, list):\n return plot_data\n else:\n return [plot_data]" }, { "identifier": "three_js_perspective_camera_focal_length", "path": "nerfstudio/viewer/server/utils.py", "snippet": "def three_js_perspective_camera_focal_length(fov: float, image_height: int):\n \"\"\"Returns the focal length of a three.js perspective camera.\n\n Args:\n fov: the field of view of the camera in degrees.\n image_height: the height of the image in pixels.\n \"\"\"\n if fov is None:\n print(\"Warning: fov is None, using default value\")\n return 50\n pp_h = image_height / 2.0\n focal_length = pp_h / np.tan(fov * (np.pi / 180.0) / 2.0)\n return focal_length" } ]
from typing import Any, Dict, Optional, Tuple from nerfstudio.cameras import camera_utils from nerfstudio.cameras.camera_utils import get_interpolated_poses_many from nerfstudio.cameras.cameras import Cameras from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length import numpy as np import torch import nerfstudio.utils.poses as pose_utils
12,430
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Code for camera paths. """ def get_interpolated_camera_path(cameras: Cameras, steps: int) -> Cameras: """Generate a camera path between two cameras. Args: cameras: Cameras object containing intrinsics of all cameras. steps: The number of steps to interpolate between the two cameras. Returns: A new set of cameras along a path. """ Ks = cameras.get_intrinsics_matrices().cpu().numpy() poses = cameras.camera_to_worlds().cpu().numpy() poses, Ks = get_interpolated_poses_many(poses, Ks, steps_per_transition=steps) cameras = Cameras(fx=Ks[:, 0, 0], fy=Ks[:, 1, 1], cx=Ks[0, 0, 2], cy=Ks[0, 1, 2], camera_to_worlds=poses) return cameras # def get_path_from_json(camera_path, probe_config) -> Cameras: # data = camera_path # fl_x = data["fx"] # fl_y = data["fy"] # cx = data["cx"] # cy = data["cy"] # width = data["w"] # height = data["h"] # transforms = [x['transform_matrix'] for x in data['frames']] # transforms = torch.tensor(transforms) # return Cameras(fx=fl_x, fy=fl_y, cx=cx, cy=cy, width=width, height=height, camera_to_worlds=transforms, probe_config=probe_config) def get_path_from_json(camera_path: Dict[str, Any], probe_config) -> Cameras: """Takes a camera path dictionary and returns a trajectory as a Camera instance. Args: camera_path: A dictionary of the camera path information coming from the viewer. Returns: A Cameras instance with the camera path. """ image_height = camera_path["render_height"] image_width = camera_path["render_width"] c2ws = [] fxs = [] fys = [] for camera in camera_path["camera_path"]: # pose c2w = torch.tensor(camera["camera_to_world"]).view(4, 4)[:3] c2ws.append(c2w) # field of view fov = camera["fov"]
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Code for camera paths. """ def get_interpolated_camera_path(cameras: Cameras, steps: int) -> Cameras: """Generate a camera path between two cameras. Args: cameras: Cameras object containing intrinsics of all cameras. steps: The number of steps to interpolate between the two cameras. Returns: A new set of cameras along a path. """ Ks = cameras.get_intrinsics_matrices().cpu().numpy() poses = cameras.camera_to_worlds().cpu().numpy() poses, Ks = get_interpolated_poses_many(poses, Ks, steps_per_transition=steps) cameras = Cameras(fx=Ks[:, 0, 0], fy=Ks[:, 1, 1], cx=Ks[0, 0, 2], cy=Ks[0, 1, 2], camera_to_worlds=poses) return cameras # def get_path_from_json(camera_path, probe_config) -> Cameras: # data = camera_path # fl_x = data["fx"] # fl_y = data["fy"] # cx = data["cx"] # cy = data["cy"] # width = data["w"] # height = data["h"] # transforms = [x['transform_matrix'] for x in data['frames']] # transforms = torch.tensor(transforms) # return Cameras(fx=fl_x, fy=fl_y, cx=cx, cy=cy, width=width, height=height, camera_to_worlds=transforms, probe_config=probe_config) def get_path_from_json(camera_path: Dict[str, Any], probe_config) -> Cameras: """Takes a camera path dictionary and returns a trajectory as a Camera instance. Args: camera_path: A dictionary of the camera path information coming from the viewer. Returns: A Cameras instance with the camera path. """ image_height = camera_path["render_height"] image_width = camera_path["render_width"] c2ws = [] fxs = [] fys = [] for camera in camera_path["camera_path"]: # pose c2w = torch.tensor(camera["camera_to_world"]).view(4, 4)[:3] c2ws.append(c2w) # field of view fov = camera["fov"]
focal_length = three_js_perspective_camera_focal_length(fov, image_height)
3
2023-12-15 20:07:22+00:00
16k
amazon-science/c2f-seg
src/image_model.py
[ { "identifier": "VQModel", "path": "taming_src/taming_models.py", "snippet": "class VQModel(nn.Module):\n def __init__(self, config):\n super(VQModel, self).__init__()\n self.config = config\n self.iteration = 0\n self.name = config.model_type\n self.m_path = os.path.join(config.path, self.name)\n self.eps = 1e-6\n\n self.ddconfig = config.model['params']['ddconfig']\n n_embed = config.model['params']['n_embed']\n embed_dim = config.model['params']['embed_dim']\n \n self.encoder = Encoder(self.ddconfig).to(config.device)\n self.decoder = Decoder(self.ddconfig).to(config.device)\n self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25).to(config.device).to(config.device)\n self.quant_conv = torch.nn.Conv2d(self.ddconfig[\"z_channels\"], embed_dim, 1).to(config.device)\n # self.quant_proj = torch.nn.Linear(self.ddconfig[\"z_channels\"], embed_dim).to(config.device)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, self.ddconfig[\"z_channels\"], 1).to(config.device)\n # self.pose_quant_proj = torch.nn.Linear(embed_dim, self.ddconfig[\"z_channels\"]).to(config.device)\n\n def encode(self, x, mask=None):\n h = self.encoder(x) # dim=256\n h = self.quant_conv(h) # dim=256\n if mask is not None:\n mask = F.max_pool2d(mask, kernel_size=int(mask.shape[2] / h.shape[2]),\n stride=int(mask.shape[2] / h.shape[2]))\n quant = quant * mask + h * (1 - mask)\n quant, emb_loss, info = self.quantize(h, mask)\n \n return quant, emb_loss, info\n\n def decode(self, quant):\n quant = self.post_quant_conv(quant) # dim: 256\n dec = self.decoder(quant)\n return dec\n\n def decode_code(self, code_b):\n quant_b = self.quantize.embed_code(code_b)\n dec = self.decode(quant_b)\n return dec\n\n def forward(self, x, mask=None):\n quant, diff, _ = self.encode(x, mask) # quant dim: 256\n\n dec = self.decode(quant)\n return dec, diff\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n def restore(self, ckpt_file, g_opt=None, d_opt=None):\n torch_init_model(self, ckpt_file, \"state_dict\")\n saving = torch.load(ckpt_file, map_location='cpu')\n if 'optimizer_states' in saving and g_opt is not None and d_opt is not None:\n opt_state = saving['optimizer_states']\n g_opt.load_state_dict(opt_state[0])\n d_opt.load_state_dict(opt_state[1])\n print(f\"Restored from {ckpt_file}\")\n return g_opt, d_opt\n\n def save(self, prefix=None, g_opt=None, d_opt=None):\n if prefix is not None:\n save_path = self.m_path + \"_{}.pth\".format(prefix)\n else:\n save_path = self.m_path + \".pth\"\n\n print('\\nsaving {} {}...\\n'.format(self.name, prefix))\n all_saving = {'state_dict': self.state_dict(),\n 'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}\n torch.save(all_saving, save_path)" }, { "identifier": "MaskedTransformer", "path": "src/image_component.py", "snippet": "class MaskedTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n embedding_dim = config.n_embd\n num_embed = config.vocab_size+1\n self.conv_in = torch.nn.Conv2d(2048, embedding_dim//2, 3, padding=1)\n # z_embedding\n self.c_emb = nn.Embedding(num_embed, embedding_dim//4)\n self.z_emb = nn.Embedding(num_embed, embedding_dim//4)\n # posotion embedding\n self.pos_emb = nn.Embedding(config.sequence_length, embedding_dim)\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.dec = Transformer_Prediction(config)\n # z dec and m dec\n self.m_dec = nn.Linear(embedding_dim, num_embed, bias=False)\n # self.m_dec.weight = self.m_emb.weight\n self.m_bias = nn.Parameter(torch.zeros(num_embed))\n\n self.sequence_length = config.sequence_length\n self.apply(self._init_weights)\n self.config = config\n\n def forward(self, img_feat, c_idx, z_idx, mask=None):\n # img_feat: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n i_embeddings = self.conv_in(img_feat) # [B, 768//2-1, 16, 16]\n i_embeddings = i_embeddings.flatten(2).transpose(-2, -1)\n # c and z embedding\n c_embeddings = self.c_emb(c_idx) # [B, 256, D//4]\n z_embeddings = self.z_emb(z_idx) # [B, 256, D//4]\n token_embeddings = torch.cat([i_embeddings, c_embeddings, z_embeddings], dim=2) # [B, 256, D]\n # add positional embeddings\n n_tokens = token_embeddings.shape[1] # 16 * 16\n position_ids = torch.arange(n_tokens, dtype=torch.long, device=z_idx.device)\n position_ids = position_ids.unsqueeze(0).repeat(z_idx.shape[0], 1) # [B, 256, 1]\n position_embeddings = self.pos_emb(position_ids) # [B, 256, D]\n\n x = self.drop(token_embeddings + position_embeddings)\n\n batch_size = token_embeddings.shape[0]\n mask = torch.ones(batch_size, 1, n_tokens, n_tokens).cuda()\n\n for block in self.blocks:\n x = block(x, mask=mask)\n\n x = self.dec(x)\n logits_m = self.m_dec(x) + self.m_bias\n \n return logits_m\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)" }, { "identifier": "Resnet_Encoder", "path": "src/image_component.py", "snippet": "class Resnet_Encoder(nn.Module):\n def __init__(self):\n super(Resnet_Encoder, self).__init__()\n self.encoder = base_resnet()\n\n def forward(self, img):\n features = self.encoder(img)\n return features" }, { "identifier": "Refine_Module", "path": "src/image_component.py", "snippet": "class Refine_Module(nn.Module):\n def __init__(self):\n super(Refine_Module, self).__init__()\n dim = 256 + 2\n self.conv_adapter = torch.nn.Conv2d(2048, 2048, 1)\n self.conv_in = torch.nn.Conv2d(2048, 256, 3, padding=1)\n self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)\n self.bn1 = torch.nn.BatchNorm2d(dim)\n\n self.lay2 = torch.nn.Conv2d(dim, 128, 3, padding=1)\n self.bn2 = torch.nn.BatchNorm2d(128)\n\n self.lay3 = torch.nn.Conv2d(128, 64, 3, padding=1)\n self.bn3 = torch.nn.BatchNorm2d(64)\n self.adapter1 = torch.nn.Conv2d(1024, 128, 1)\n\n # visible mask branch\n self.lay4_vm = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_vm = torch.nn.BatchNorm2d(32)\n self.lay5_vm = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_vm = torch.nn.BatchNorm2d(16)\n self.adapter2_vm = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_vm = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_vm = torch.nn.Conv2d(16, 1, 3, padding=1)\n\n # amodal mask branch\n self.lay4_am = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_am = torch.nn.BatchNorm2d(32)\n self.lay5_am = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_am = torch.nn.BatchNorm2d(16)\n self.adapter2_am = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_am = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_am = torch.nn.Conv2d(16, 1, 3, padding=1)\n \n def get_attn_map(self, feature, guidance):\n b,c,h,w = guidance.shape\n q = torch.flatten(guidance, start_dim=2)\n v = torch.flatten(feature, start_dim=2)\n\n k = v * q\n k = k.sum(dim=-1, keepdim=True) / (q.sum(dim=-1, keepdim=True) + 1e-6)\n attn = (k.transpose(-2, -1) @ v) / 1\n attn = F.softmax(attn, dim=-1)\n attn = attn.reshape(b, c, h, w)\n return attn\n \n def forward(self, features, coarse_mask):\n # features: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n # coarse_mask: [B, 1, 256, 256]\n feat = self.conv_adapter(features[-1])\n coarse_mask = F.interpolate(coarse_mask, scale_factor=(1/16))\n attn_map = self.get_attn_map(feat, coarse_mask)\n x = self.conv_in(feat)\n x = torch.cat((x, attn_map, coarse_mask), dim=1)\n x = F.relu(self.bn1(self.lay1(x)))\n x = F.relu(self.bn2(self.lay2(x)))\n \n cur_feat = self.adapter1(features[-2])\n x = cur_feat + x\n x = F.interpolate(x, size=(32, 32), mode=\"nearest\")\n x = F.relu(self.bn3(self.lay3(x)))\n\n # TODO: visible mask branch\n cur_feat_vm = self.adapter2_vm(features[-3])\n x_vm = cur_feat_vm + x\n x_vm = F.interpolate(x_vm, size=(64, 64), mode=\"nearest\")\n x_vm = F.relu(self.bn4_vm(self.lay4_vm(x_vm)))\n\n cur_feat_vm = self.adapter3_vm(features[-4])\n x_vm = cur_feat_vm + x_vm\n x_vm = F.interpolate(x_vm, size=(128, 128), mode=\"nearest\")\n x_vm = F.relu(self.bn5_vm(self.lay5_vm(x_vm)))\n \n x_vm = self.out_lay_vm(x_vm)\n\n # TODO: full mask branch\n cur_feat_am = self.adapter2_am(features[-3])\n x_am = cur_feat_am + x\n x_am = F.interpolate(x_am, size=(64, 64), mode=\"nearest\")\n x_am = F.relu(self.bn4_am(self.lay4_am(x_am)))\n\n cur_feat_am = self.adapter3_am(features[-4])\n x_am = cur_feat_am + x_am\n x_am = F.interpolate(x_am, size=(128, 128), mode=\"nearest\")\n x_am = F.relu(self.bn5_am(self.lay5_am(x_am)))\n \n x_am = self.out_lay_am(x_am)\n\n return x_vm, x_am" }, { "identifier": "VGG19", "path": "src/loss.py", "snippet": "class VGG19(torch.nn.Module):\n def __init__(self, pretrained=True, vgg_norm=False):\n super(VGG19, self).__init__()\n self.vgg_norm = vgg_norm\n features = models.vgg19(pretrained=pretrained).features\n self.relu1_1 = torch.nn.Sequential()\n self.relu1_2 = torch.nn.Sequential()\n\n self.relu2_1 = torch.nn.Sequential()\n self.relu2_2 = torch.nn.Sequential()\n\n self.relu3_1 = torch.nn.Sequential()\n self.relu3_2 = torch.nn.Sequential()\n self.relu3_3 = torch.nn.Sequential()\n self.relu3_4 = torch.nn.Sequential()\n\n self.relu4_1 = torch.nn.Sequential()\n self.relu4_2 = torch.nn.Sequential()\n self.relu4_3 = torch.nn.Sequential()\n self.relu4_4 = torch.nn.Sequential()\n\n self.relu5_1 = torch.nn.Sequential()\n self.relu5_2 = torch.nn.Sequential()\n self.relu5_3 = torch.nn.Sequential()\n self.relu5_4 = torch.nn.Sequential()\n\n for x in range(2):\n self.relu1_1.add_module(str(x), features[x])\n\n for x in range(2, 4):\n self.relu1_2.add_module(str(x), features[x])\n\n for x in range(4, 7):\n self.relu2_1.add_module(str(x), features[x])\n\n for x in range(7, 9):\n self.relu2_2.add_module(str(x), features[x])\n\n for x in range(9, 12):\n self.relu3_1.add_module(str(x), features[x])\n\n for x in range(12, 14):\n self.relu3_2.add_module(str(x), features[x])\n\n for x in range(14, 16):\n self.relu3_3.add_module(str(x), features[x])\n\n for x in range(16, 18):\n self.relu3_4.add_module(str(x), features[x])\n\n for x in range(18, 21):\n self.relu4_1.add_module(str(x), features[x])\n\n for x in range(21, 23):\n self.relu4_2.add_module(str(x), features[x])\n\n for x in range(23, 25):\n self.relu4_3.add_module(str(x), features[x])\n\n for x in range(25, 27):\n self.relu4_4.add_module(str(x), features[x])\n\n for x in range(27, 30):\n self.relu5_1.add_module(str(x), features[x])\n\n for x in range(30, 32):\n self.relu5_2.add_module(str(x), features[x])\n\n for x in range(32, 34):\n self.relu5_3.add_module(str(x), features[x])\n\n for x in range(34, 36):\n self.relu5_4.add_module(str(x), features[x])\n\n # don't need the gradients, just want the features\n for param in self.parameters():\n param.requires_grad = False\n\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n\n def forward(self, x):\n if self.vgg_norm:\n x = (x + 1) / 2 # -1~1 --> 0~1\n # 由0~1重新归一化\n mean = torch.as_tensor(self.mean, dtype=x.dtype, device=x.device)\n std = torch.as_tensor(self.std, dtype=x.dtype, device=x.device)\n x.sub_(mean[None,:, None, None]).div_(std[None,:, None, None])\n\n relu1_1 = self.relu1_1(x)\n relu1_2 = self.relu1_2(relu1_1)\n\n relu2_1 = self.relu2_1(relu1_2)\n relu2_2 = self.relu2_2(relu2_1)\n\n relu3_1 = self.relu3_1(relu2_2)\n relu3_2 = self.relu3_2(relu3_1)\n relu3_3 = self.relu3_3(relu3_2)\n relu3_4 = self.relu3_4(relu3_3)\n\n relu4_1 = self.relu4_1(relu3_4)\n relu4_2 = self.relu4_2(relu4_1)\n relu4_3 = self.relu4_3(relu4_2)\n relu4_4 = self.relu4_4(relu4_3)\n\n relu5_1 = self.relu5_1(relu4_4)\n relu5_2 = self.relu5_2(relu5_1)\n relu5_3 = self.relu5_3(relu5_2)\n relu5_4 = self.relu5_4(relu5_3)\n\n out = {\n 'relu1_1': relu1_1,\n 'relu1_2': relu1_2,\n\n 'relu2_1': relu2_1,\n 'relu2_2': relu2_2,\n\n 'relu3_1': relu3_1,\n 'relu3_2': relu3_2,\n 'relu3_3': relu3_3,\n 'relu3_4': relu3_4,\n\n 'relu4_1': relu4_1,\n 'relu4_2': relu4_2,\n 'relu4_3': relu4_3,\n 'relu4_4': relu4_4,\n\n 'relu5_1': relu5_1,\n 'relu5_2': relu5_2,\n 'relu5_3': relu5_3,\n 'relu5_4': relu5_4,\n }\n return out" }, { "identifier": "PerceptualLoss", "path": "src/loss.py", "snippet": "class PerceptualLoss(nn.Module):\n r\"\"\"\n Perceptual loss, VGG-based\n https://arxiv.org/abs/1603.08155\n https://github.com/dxyang/StyleTransfer/blob/master/utils.py\n \"\"\"\n\n def __init__(self, vgg, weights=[1.0, 1.0, 1.0, 1.0, 1.0], reduction='mean'):\n super(PerceptualLoss, self).__init__()\n # self.add_module('vgg', VGG19())\n self.vgg = vgg\n self.reduction = reduction\n self.criterion = torch.nn.L1Loss(reduction=reduction)\n self.weights = weights\n\n def __call__(self, x, y):\n # Compute features\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n\n if self.reduction == 'mean':\n content_loss = 0.0\n content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])\n content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])\n content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])\n content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])\n content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])\n elif self.reduction == 'none':\n content_loss = []\n content_loss.append(self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1']))\n content_loss.append(self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1']))\n content_loss.append(self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1']))\n content_loss.append(self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1']))\n content_loss.append(self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1']))\n else:\n raise NotImplementedError\n\n return content_loss" }, { "identifier": "AdamW", "path": "utils/pytorch_optimization.py", "snippet": "class AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n # exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n # exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(grad, alpha = 1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value = 1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n # p.data.addcdiv_(-step_size, exp_avg, denom)\n p.data.addcdiv_(exp_avg, denom, value = -step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha = -group[\"lr\"] * group[\"weight_decay\"])\n\n return loss" }, { "identifier": "get_linear_schedule_with_warmup", "path": "utils/pytorch_optimization.py", "snippet": "def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases linearly after\n linearly increasing during a warmup period.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n return max(\n 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))\n )\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)" }, { "identifier": "torch_show_all_params", "path": "utils/utils.py", "snippet": "def torch_show_all_params(model):\n params = list(model.parameters())\n k = 0\n for i in params:\n l = 1\n for j in i.size():\n l *= j\n k = k + l\n return k" }, { "identifier": "torch_init_model", "path": "utils/utils.py", "snippet": "def torch_init_model(model, init_checkpoint, key):\n state_dict = torch.load(init_checkpoint, map_location='cpu')[key]\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n\n load(model, prefix='')\n \n print(\"missing keys:{}\".format(missing_keys))\n print('unexpected keys:{}'.format(unexpected_keys))\n print('error msgs:{}'.format(error_msgs))" }, { "identifier": "Config", "path": "utils/utils.py", "snippet": "class Config(object):\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n self._yaml = f.read()\n self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)\n self._dict['path'] = os.path.dirname(config_path)\n\n def __getattr__(self, name):\n if self._dict.get(name) is not None:\n return self._dict[name]\n\n return None\n\n def print(self):\n print('Model configurations:')\n print('---------------------------------')\n print(self._yaml)\n print('')\n print('---------------------------------')\n print('')" }, { "identifier": "evaluation_image", "path": "utils/evaluation.py", "snippet": "def evaluation_image(frame_pred, frame_label, counts, meta, save_dict=None):\n frame_pred = (frame_pred > 0.5).to(torch.int64)\n frame_label = frame_label.to(torch.int64)\n counts = counts.to(torch.int64)\n vm_no_crop_gt = meta[\"vm_no_crop_gt\"].squeeze().unsqueeze(0).to(torch.int64)\n frame_pred = frame_pred.unsqueeze(0)\n frame_label = frame_label.unsqueeze(0)\n\n iou_ = get_IoU(frame_pred, frame_label)\n invisible_iou_= iou(frame_pred - vm_no_crop_gt, frame_label - vm_no_crop_gt)\n if (frame_label - vm_no_crop_gt).sum()==0:\n counts-=1\n return iou_.sum(), invisible_iou_, counts" }, { "identifier": "CrossEntropyLoss", "path": "utils/loss.py", "snippet": "class CrossEntropyLoss(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n\n Equation: y = (1 - epsilon) * y + epsilon / K.\n\n Args:\n - num_classes (int): number of classes\n - epsilon (float): weight\n - use_gpu (bool): whether to use gpu devices\n - label_smooth (bool): whether to apply label smoothing, if False, epsilon = 0\n \"\"\"\n def __init__(self, num_classes, epsilon=0.1, device=None, label_smooth=True):\n super(CrossEntropyLoss, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon if label_smooth else 0\n self.device = device\n if device is None:\n self.logsoftmax = nn.LogSoftmax(dim=1)\n else:\n self.logsoftmax = nn.LogSoftmax(dim=1).to(device)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n - inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n - targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)\n if self.device is not None:\n targets = targets.to(self.device)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss" } ]
import os import math import random import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist from torchvision import transforms from taming_src.taming_models import VQModel from src.image_component import MaskedTransformer, Resnet_Encoder, Refine_Module from src.loss import VGG19, PerceptualLoss from utils.pytorch_optimization import AdamW, get_linear_schedule_with_warmup from utils.utils import torch_show_all_params, torch_init_model from utils.utils import Config from utils.evaluation import evaluation_image from utils.loss import CrossEntropyLoss from tqdm import tqdm
11,249
mask_out = [] for t in range(start_iter, T): logits = self.transformer(img_feat[-1], src_indices, cur_ids, mask=None) # [B, L, N] logits = logits[..., :-1] logits = self.top_k_logits(logits, k=3) probs = F.softmax(logits, dim=-1) # convert logits into probs [B, 256, vocab_size+1] sampled_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] unknown_map = (cur_ids == self.mask_token_idx) # which tokens need to be sampled -> bool [B, 256] sampled_ids = torch.where(unknown_map, sampled_ids, cur_ids) # replace all -1 with their samples and leave the others untouched [B, 256] seq_out.append(sampled_ids) mask_out.append(1. * unknown_map) ratio = 1. * (t + 1) / T # just a percentage e.g. 1 / 12 mask_ratio = gamma(ratio) selected_probs = probs.gather(dim=-1, index=sampled_ids.unsqueeze(-1)).squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.Tensor([np.inf]).to(logits.device)) # ignore tokens which are already sampled [B, 256] mask_len = torch.unsqueeze(torch.floor(unknown_number_in_the_beginning * mask_ratio), 1) # floor(256 * 0.99) = 254 --> [254, 254, 254, 254, ....] (B x 1) mask_len = torch.maximum(torch.ones_like(mask_len), torch.minimum(torch.sum(unknown_map, dim=-1, keepdim=True) - 1, mask_len)) # Adds noise for randomness masking = self.mask_by_random_topk(mask_len, selected_probs, temperature=self.choice_temperature * (1. - ratio)) # Masks tokens with lower confidence. cur_ids = torch.where(masking, self.mask_token_idx, sampled_ids) # [B, L] seq_ids = torch.stack(seq_out, dim=1) # [B, T, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids[:,-1,:].reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop_old = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop_old) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop_gt']) # pred_vm_crop = (pred_vm_crop>=0.5).to(torch.float32) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop']) # pred_fm_crop = (pred_fm_crop>=0.5).to(torch.float32) pred_vm = self.align_raw_size(pred_vm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = self.align_raw_size(pred_fm_crop, meta['obj_position'], meta["vm_pad"], meta) # visualization self.visualize(pred_vm, pred_fm, meta, mode, iter) loss_eval = self.loss_and_evaluation(pred_fm, meta, iter, mode, pred_vm=pred_vm) loss_eval["loss_fm"] = loss_fm loss_eval["loss_vm"] = loss_vm return loss_eval def visualize(self, pred_vm, pred_fm, meta, mode, iteration): pred_fm = pred_fm.squeeze() pred_vm = pred_vm.squeeze() gt_vm = meta["vm_no_crop"].squeeze() gt_fm = meta["fm_no_crop"].squeeze() to_plot = torch.cat((pred_vm, pred_fm, gt_vm, gt_fm)).cpu().numpy() save_dir = os.path.join(self.root_path, '{}_samples'.format(mode)) image_id, anno_id= meta["img_id"], meta["anno_id"] plt.imsave("{}/{}_{}_{}.png".format(save_dir, iteration, int(image_id.item()), int(anno_id.item())), to_plot) # def visualize_crop(self, pred_vm, pred_fm, meta, mode, count, pred_fm_crop_old): # pred_fm = pred_fm.squeeze() # pred_vm = pred_vm.squeeze() # pred_fm_crop_old = pred_fm_crop_old.squeeze() # gt_vm = meta["vm_crop"].squeeze() # gt_fm = meta["fm_crop"].squeeze() # to_plot = torch.cat((pred_vm, gt_vm, pred_fm_crop_old, pred_fm, gt_fm)).cpu().numpy() # save_dir = os.path.join(self.root_path, '{}_samples'.format(mode)) # image_id, anno_id= meta["img_id"], meta["anno_id"] # plt.imsave("{}/{}_{}_{}_{}.png".format(save_dir, count, int(image_id.item()), int(anno_id.item()), "crop"), to_plot) def create_inputs_tokens_normal(self, num, device): self.num_latent_size = self.config['resolution'] // self.config['patch_size'] blank_tokens = torch.ones((num, self.num_latent_size ** 2), device=device) masked_tokens = self.mask_token_idx * blank_tokens return masked_tokens.to(torch.int64) def gamma_func(self, mode="cosine"): if mode == "linear": return lambda r: 1 - r elif mode == "cosine": return lambda r: np.cos(r * np.pi / 2) elif mode == "square": return lambda r: 1 - r ** 2 elif mode == "cubic": return lambda r: 1 - r ** 3 elif mode == "log": return lambda r, total_unknown: - np.log2(r) / np.log2(total_unknown) else: raise NotImplementedError def mask_by_random_topk(self, mask_len, probs, temperature=1.0): confidence = torch.log(probs) + temperature * torch.distributions.gumbel.Gumbel(0, 1).sample(probs.shape).to(probs.device) sorted_confidence, _ = torch.sort(confidence, dim=-1) # from small to large # Obtains cut off threshold given the mask lengths. # cut_off = torch.take_along_dim(sorted_confidence, mask_len.to(torch.long), dim=-1) cut_off = sorted_confidence.gather(dim=-1, index=mask_len.to(torch.long)) # Masks tokens with lower confidence. masking = (confidence < cut_off) return masking def load(self, is_test=False, prefix=None): if prefix is not None: transformer_path = self.transformer_path + prefix + '.pth' else: transformer_path = self.transformer_path + '_last.pth' if self.config.restore or is_test: if os.path.exists(transformer_path): print('Rank {} is loading {} Transformer...'.format(self.rank, transformer_path)) data = torch.load(transformer_path, map_location="cpu")
class C2F_Seg(nn.Module): def __init__(self, config, g_path, mode, logger=None, save_eval_dict={}): super(C2F_Seg, self).__init__() self.config = config self.iteration = 0 self.sample_iter = 0 self.name = config.model_type # load g model for mask self.g_config = Config(os.path.join(g_path, 'vqgan_{}.yml'.format(config.dataset))) self.g_path = os.path.join(g_path, self.g_config.model_type) self.root_path = config.path self.transformer_path = os.path.join(config.path, self.name) self.mode = mode self.save_eval_dict = save_eval_dict self.eps = 1e-6 self.train_sample_iters = config.train_sample_iters self.g_model = VQModel(self.g_config).to(config.device) self.img_encoder = Resnet_Encoder().to(config.device) self.refine_module = Refine_Module().to(config.device) self.transformer = MaskedTransformer(config).to(config.device) self.g_model.eval() self.refine_criterion = nn.BCELoss() self.criterion = CrossEntropyLoss(num_classes=config.vocab_size+1, device=config.device) if config.train_with_dec: if not config.gumbel_softmax: self.temperature = nn.Parameter(torch.tensor([config.tp], dtype=torch.float32), requires_grad=True).to(config.device) if config.use_vgg: vgg = VGG19(pretrained=True, vgg_norm=config.vgg_norm).to(config.device) vgg.eval() reduction = 'mean' if config.balanced_loss is False else 'none' self.perceptual_loss = PerceptualLoss(vgg, weights=config.vgg_weights, reduction=reduction).to(config.device) else: self.perceptual_loss = None if config.init_gpt_with_vqvae: self.transformer.z_emb.weight = self.g_model.quantize.embedding.weight if logger is not None: logger.info('Gen Parameters:{}'.format(torch_show_all_params(self.g_model))) logger.info('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer))) else: print('Gen Parameters:{}'.format(torch_show_all_params(self.g_model))) print('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer))) # loss no_decay = ['bias', 'ln1.bias', 'ln1.weight', 'ln2.bias', 'ln2.weight'] param_optimizer = self.transformer.named_parameters() param_optimizer_encoder = self.img_encoder.named_parameters() param_optimizer_refine= self.refine_module.named_parameters() optimizer_parameters = [ {'params': [p for n, p in param_optimizer if not any([nd in n for nd in no_decay])], 'weight_decay': config.weight_decay}, {'params': [p for n, p in param_optimizer if any([nd in n for nd in no_decay])], 'weight_decay': 0.0}, {'params': [p for n, p in param_optimizer_encoder], 'weight_decay': config.weight_decay}, {'params': [p for n, p in param_optimizer_refine], 'weight_decay': config.weight_decay}, ] self.opt = AdamW(params=optimizer_parameters, lr=float(config.lr), betas=(config.beta1, config.beta2)) self.sche = get_linear_schedule_with_warmup(self.opt, num_warmup_steps=config.warmup_iters, num_training_steps=config.max_iters) self.rank = dist.get_rank() self.gamma = self.gamma_func(mode=config.gamma_mode) self.mask_token_idx = config.vocab_size self.choice_temperature = 4.5 self.Image_W = config.Image_W self.Image_H = config.Image_H self.patch_W = config.patch_W self.patch_H = config.patch_H @torch.no_grad() def encode_to_z(self, x, mask=None): if len(x.size())==5: x = x[0] quant_z, _, info = self.g_model.encode(x.float(), mask) # [B,D,H,W] indices = info[2].view(quant_z.shape[0], -1) # [B, L] return quant_z, indices def get_attn_map(self, feature, guidance): guidance = F.interpolate(guidance, scale_factor=(1/16)) b,c,h,w = guidance.shape q = torch.flatten(guidance, start_dim=2) v = torch.flatten(feature, start_dim=2) k = v * q k = k.sum(dim=-1, keepdim=True) / (q.sum(dim=-1, keepdim=True) + 1e-6) attn = (k.transpose(-2, -1) @ v) / 1 attn = F.softmax(attn, dim=-1) attn = attn.reshape(b, c, h, w) return attn def get_losses(self, meta): self.iteration += 1 z_loss = 0 img_feat = self.img_encoder(meta['img_crop'].permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) r = np.maximum(self.gamma(np.random.uniform()), self.config.min_mask_rate) r = math.floor(r * tgt_indices.shape[1]) sample = torch.rand(tgt_indices.shape, device=tgt_indices.device).topk(r, dim=1).indices random_mask = torch.zeros(tgt_indices.shape, dtype=torch.bool, device=tgt_indices.device) random_mask.scatter_(dim=1, index=sample, value=True) # [B, L] # concat mask mask = random_mask masked_indices = self.mask_token_idx * torch.ones_like(tgt_indices, device=tgt_indices.device) # [B, L] z_indices = (~mask) * tgt_indices + mask * masked_indices # [B, L] logits_z = self.transformer(img_feat[-1], src_indices, z_indices, mask=None) target = tgt_indices z_loss = self.criterion(logits_z.view(-1, logits_z.size(-1)), target.view(-1)) with torch.no_grad(): logits_z = logits_z[..., :-1] logits_z = self.top_k_logits(logits_z, k=5) probs = F.softmax(logits_z, dim=-1) seq_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids.reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop.detach()) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop_gt']) # pred_vm_crop = (pred_vm_crop>=0.5).to(torch.float32) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop']) # pred_fm_crop = (pred_fm_crop>=0.5).to(torch.float32) logs = [ ("z_loss", z_loss.item()), ("loss_vm", loss_vm.item()), ("loss_fm", loss_fm.item()), ] return z_loss, loss_vm+loss_fm, logs def align_raw_size(self, full_mask, obj_position, vm_pad, meta): vm_np_crop = meta["vm_no_crop"].squeeze() H, W = vm_np_crop.shape[-2], vm_np_crop.shape[-1] bz, seq_len = full_mask.shape[:2] new_full_mask = torch.zeros((bz, seq_len, H, W)).to(torch.float32).cuda() if len(vm_pad.shape)==3: vm_pad = vm_pad[0] obj_position = obj_position[0] for b in range(bz): paddings = vm_pad[b] position = obj_position[b] new_fm = full_mask[ b, :, :-int(paddings[0]) if int(paddings[0]) !=0 else None, :-int(paddings[1]) if int(paddings[1]) !=0 else None ] vx_min = int(position[0]) vx_max = min(H, int(position[1])+1) vy_min = int(position[2]) vy_max = min(W, int(position[3])+1) resize = transforms.Resize([vx_max-vx_min, vy_max-vy_min]) try: new_fm = resize(new_fm) new_full_mask[b, :, vx_min:vx_max, vy_min:vy_max] = new_fm[0] except: new_fm = new_fm return new_full_mask def loss_and_evaluation(self, pred_fm, meta, iter, mode, pred_vm=None): loss_eval = {} pred_fm = pred_fm.squeeze() counts = meta["counts"].reshape(-1).to(pred_fm.device) fm_no_crop = meta["fm_no_crop"].squeeze() vm_no_crop = meta["vm_no_crop"].squeeze() pred_vm = pred_vm.squeeze() # post-process pred_fm = (pred_fm > 0.5).to(torch.int64) pred_vm = (pred_vm > 0.5).to(torch.int64) iou, invisible_iou_, iou_count = evaluation_image((pred_fm > 0.5).to(torch.int64), fm_no_crop, counts, meta, self.save_eval_dict) loss_eval["iou"] = iou loss_eval["invisible_iou_"] = invisible_iou_ loss_eval["occ_count"] = iou_count loss_eval["iou_count"] = torch.Tensor([1]).cuda() pred_fm_post = pred_fm + vm_no_crop pred_fm_post = (pred_fm_post>0.5).to(torch.int64) iou_post, invisible_iou_post, iou_count_post = evaluation_image(pred_fm_post, fm_no_crop, counts, meta, self.save_eval_dict) loss_eval["iou_post"] = iou_post loss_eval["invisible_iou_post"] = invisible_iou_post return loss_eval def backward(self, loss=None): self.opt.zero_grad() loss.backward() self.opt.step() self.sche.step() def top_k_logits(self, logits, k): v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[..., [-1]]] = -float('Inf') return out @torch.no_grad() def batch_predict_maskgit(self, meta, iter, mode, T=3, start_iter=0): ''' :param x:[B,3,H,W] image :param c:[b,X,H,W] condition :param mask: [1,1,H,W] mask ''' self.sample_iter += 1 img_feat = self.img_encoder(meta['img_crop'].permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) # _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) masked_indices = self.mask_token_idx * torch.ones_like(src_indices, device=src_indices.device) # [B, L] unknown_number_in_the_beginning = torch.sum(masked_indices == self.mask_token_idx, dim=-1) # [B] gamma = self.gamma_func("cosine") cur_ids = masked_indices # [B, L] seq_out = [] mask_out = [] for t in range(start_iter, T): logits = self.transformer(img_feat[-1], src_indices, cur_ids, mask=None) # [B, L, N] logits = logits[..., :-1] logits = self.top_k_logits(logits, k=3) probs = F.softmax(logits, dim=-1) # convert logits into probs [B, 256, vocab_size+1] sampled_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] unknown_map = (cur_ids == self.mask_token_idx) # which tokens need to be sampled -> bool [B, 256] sampled_ids = torch.where(unknown_map, sampled_ids, cur_ids) # replace all -1 with their samples and leave the others untouched [B, 256] seq_out.append(sampled_ids) mask_out.append(1. * unknown_map) ratio = 1. * (t + 1) / T # just a percentage e.g. 1 / 12 mask_ratio = gamma(ratio) selected_probs = probs.gather(dim=-1, index=sampled_ids.unsqueeze(-1)).squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.Tensor([np.inf]).to(logits.device)) # ignore tokens which are already sampled [B, 256] mask_len = torch.unsqueeze(torch.floor(unknown_number_in_the_beginning * mask_ratio), 1) # floor(256 * 0.99) = 254 --> [254, 254, 254, 254, ....] (B x 1) mask_len = torch.maximum(torch.ones_like(mask_len), torch.minimum(torch.sum(unknown_map, dim=-1, keepdim=True) - 1, mask_len)) # Adds noise for randomness masking = self.mask_by_random_topk(mask_len, selected_probs, temperature=self.choice_temperature * (1. - ratio)) # Masks tokens with lower confidence. cur_ids = torch.where(masking, self.mask_token_idx, sampled_ids) # [B, L] seq_ids = torch.stack(seq_out, dim=1) # [B, T, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids[:,-1,:].reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop_old = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop_old) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop_gt']) # pred_vm_crop = (pred_vm_crop>=0.5).to(torch.float32) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop']) # pred_fm_crop = (pred_fm_crop>=0.5).to(torch.float32) pred_vm = self.align_raw_size(pred_vm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = self.align_raw_size(pred_fm_crop, meta['obj_position'], meta["vm_pad"], meta) # visualization self.visualize(pred_vm, pred_fm, meta, mode, iter) loss_eval = self.loss_and_evaluation(pred_fm, meta, iter, mode, pred_vm=pred_vm) loss_eval["loss_fm"] = loss_fm loss_eval["loss_vm"] = loss_vm return loss_eval def visualize(self, pred_vm, pred_fm, meta, mode, iteration): pred_fm = pred_fm.squeeze() pred_vm = pred_vm.squeeze() gt_vm = meta["vm_no_crop"].squeeze() gt_fm = meta["fm_no_crop"].squeeze() to_plot = torch.cat((pred_vm, pred_fm, gt_vm, gt_fm)).cpu().numpy() save_dir = os.path.join(self.root_path, '{}_samples'.format(mode)) image_id, anno_id= meta["img_id"], meta["anno_id"] plt.imsave("{}/{}_{}_{}.png".format(save_dir, iteration, int(image_id.item()), int(anno_id.item())), to_plot) # def visualize_crop(self, pred_vm, pred_fm, meta, mode, count, pred_fm_crop_old): # pred_fm = pred_fm.squeeze() # pred_vm = pred_vm.squeeze() # pred_fm_crop_old = pred_fm_crop_old.squeeze() # gt_vm = meta["vm_crop"].squeeze() # gt_fm = meta["fm_crop"].squeeze() # to_plot = torch.cat((pred_vm, gt_vm, pred_fm_crop_old, pred_fm, gt_fm)).cpu().numpy() # save_dir = os.path.join(self.root_path, '{}_samples'.format(mode)) # image_id, anno_id= meta["img_id"], meta["anno_id"] # plt.imsave("{}/{}_{}_{}_{}.png".format(save_dir, count, int(image_id.item()), int(anno_id.item()), "crop"), to_plot) def create_inputs_tokens_normal(self, num, device): self.num_latent_size = self.config['resolution'] // self.config['patch_size'] blank_tokens = torch.ones((num, self.num_latent_size ** 2), device=device) masked_tokens = self.mask_token_idx * blank_tokens return masked_tokens.to(torch.int64) def gamma_func(self, mode="cosine"): if mode == "linear": return lambda r: 1 - r elif mode == "cosine": return lambda r: np.cos(r * np.pi / 2) elif mode == "square": return lambda r: 1 - r ** 2 elif mode == "cubic": return lambda r: 1 - r ** 3 elif mode == "log": return lambda r, total_unknown: - np.log2(r) / np.log2(total_unknown) else: raise NotImplementedError def mask_by_random_topk(self, mask_len, probs, temperature=1.0): confidence = torch.log(probs) + temperature * torch.distributions.gumbel.Gumbel(0, 1).sample(probs.shape).to(probs.device) sorted_confidence, _ = torch.sort(confidence, dim=-1) # from small to large # Obtains cut off threshold given the mask lengths. # cut_off = torch.take_along_dim(sorted_confidence, mask_len.to(torch.long), dim=-1) cut_off = sorted_confidence.gather(dim=-1, index=mask_len.to(torch.long)) # Masks tokens with lower confidence. masking = (confidence < cut_off) return masking def load(self, is_test=False, prefix=None): if prefix is not None: transformer_path = self.transformer_path + prefix + '.pth' else: transformer_path = self.transformer_path + '_last.pth' if self.config.restore or is_test: if os.path.exists(transformer_path): print('Rank {} is loading {} Transformer...'.format(self.rank, transformer_path)) data = torch.load(transformer_path, map_location="cpu")
torch_init_model(self.transformer, transformer_path, 'model')
9
2023-12-21 04:25:47+00:00
16k
MingtaoGuo/AnimateAnyone_unofficial
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,316
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', 'txt', 'vision']: xc = batch[cond_key] xc = rearrange(xc, 'b h w c -> b c h w') elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass
elif isimage(xc):
4
2023-12-16 03:31:33+00:00
16k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]" }, { "identifier": "add_clouds_config", "path": "clouds/config.py", "snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100" }, { "identifier": "add_wandb_config", "path": "clouds/config.py", "snippet": "def add_wandb_config(cfg):\n # Wandb\n cfg.WANDB = CN()\n cfg.WANDB.PROJECT = \"clouds\"\n cfg.WANDB.NAME = None\n # use flash attention\n cfg.MODEL.FLASH = False" }, { "identifier": "add_prerocessing_training_set_config", "path": "clouds/config.py", "snippet": "def add_prerocessing_training_set_config(cfg):\n cfg.INPUT.FLIP = True\n cfg.INPUT.INITIAL_HEIGHT = 1052\n cfg.INPUT.INITIAL_WIDTH = 1914\n cfg.INPUT.RESIZE_HEIGHT = 720\n cfg.INPUT.RESIZE_WIDTH = 1280\n cfg.INPUT.PL_THRESHOLD = 0.0\n\n cfg.DATASETS.SOURCE_FACTOR = 1.0\n cfg.DATASETS.TARGET_FACTOR = 1.0" }, { "identifier": "add_repeat_factors", "path": "clouds/config.py", "snippet": "def add_repeat_factors(cfg):\n # for the Dataset repeat factor\n if (\n len(cfg.DATASETS.TRAIN) == 2\n and cfg.DATALOADER.SAMPLER_TRAIN == \"WeightedTrainingSampler\"\n ):\n if \"sd\" in cfg.DATASETS.TRAIN[0]:\n target_dataset = cfg.DATASETS.TRAIN[0]\n source_dataset = cfg.DATASETS.TRAIN[1]\n else:\n target_dataset = cfg.DATASETS.TRAIN[1]\n source_dataset = cfg.DATASETS.TRAIN[0]\n\n TRAIN_REPEAT_FACTOR = [\n (target_dataset, cfg.DATASETS.TARGET_FACTOR),\n (source_dataset, cfg.DATASETS.SOURCE_FACTOR),\n ]\n cfg.DATASETS.TRAIN_REPEAT_FACTOR = TRAIN_REPEAT_FACTOR\n return cfg\n else:\n return cfg" }, { "identifier": "MapperTrain", "path": "clouds/data/dataset_mappers/mapper_train.py", "snippet": "class MapperTrain:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations_src,\n augmentations_sd,\n augmentations_photo,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens_src = augmentations_src\n self.tfm_gens_sd = augmentations_sd\n self.tfm_gens_photometric = augmentations_photo\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(\n f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations_src}\"\n )\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n augs_src = []\n augs_sd = []\n augs_photometric = []\n # Build augmentation\n if cfg.INPUT.RESIZE.ENABLED:\n augs_src.append(\n T.ResizeScale(\n min_scale=0.5,\n max_scale=2.0,\n target_height=cfg.INPUT.INITIAL_HEIGHT,\n target_width=cfg.INPUT.INITIAL_WIDTH,\n interp=Image.BILINEAR,\n )\n )\n if cfg.INPUT.CROP.ENABLED:\n augs_src.append(\n T.FixedSizeCrop(\n (768, 768),\n pad=True,\n seg_pad_value=255,\n pad_value=0,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs_src.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs_photometric.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n if cfg.INPUT.FLIP:\n augs_src.append(T.RandomFlip())\n augs_sd.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations_src\": augs_src,\n \"augmentations_sd\": augs_sd,\n \"augmentations_photo\": augs_photometric,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert (\n self.is_train\n ), \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\n \"double\"\n )\n else:\n sem_seg_gt = np.full(\n (dataset_dict[\"height\"], dataset_dict[\"width\"]), self.ignore_label\n ).astype(\"double\")\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n if not (\"generated\" in str(dataset_dict[\"image_id\"])):\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_src, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n else:\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_sd, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n aug_input_photo, transforms = T.apply_transform_gens(\n self.tfm_gens_photometric, aug_input\n )\n image_aug = aug_input_photo.image\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = torch.as_tensor(\n np.ascontiguousarray(image_aug.transpose(2, 0, 1))\n )\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = F.pad(image_aug, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(\n sem_seg_gt, padding_size, value=self.ignore_label\n ).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n dataset_dict[\"image_aug\"] = image_aug\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\n \"Semantic segmentation dataset should not have 'annotations'.\"\n )\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros(\n (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])\n )\n else:\n masks = BitMasks(\n torch.stack(\n [\n torch.from_numpy(np.ascontiguousarray(x.copy()))\n for x in masks\n ]\n )\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MapperTest", "path": "clouds/data/dataset_mappers/mapper_test.py", "snippet": "class MapperTest:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n # if recompute_boxes:\n # assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = augmentations\n self.image_format = image_format\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = [T.ResizeShortestEdge(short_edge_length=[1024], sample_style=\"choice\")]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transformation = T.apply_transform_gens(self.augmentations, aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n dataset_dict['height'] = dataset_dict[\"image\"].shape[1]\n dataset_dict['width'] = dataset_dict[\"image\"].shape[2]\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n return dataset_dict" }, { "identifier": "CityscapesSemSegEvaluator", "path": "clouds/evaluation/cityscapes_evaluation.py", "snippet": "class CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(\n os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\")\n )\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(\n cityscapes_eval.getPrediction(cityscapes_eval.args, gt)\n )\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": 100.0 * results[\"averageScoreClasses\"],\n \"IoU.road\": 100.0 * results[\"classScores\"][\"road\"],\n \"IoU.sidewalk\": 100.0 * results[\"classScores\"][\"sidewalk\"],\n \"IoU.building\": 100.0 * results[\"classScores\"][\"building\"],\n \"IoU.wall\": 100.0 * results[\"classScores\"][\"wall\"],\n \"IoU.fence\": 100.0 * results[\"classScores\"][\"fence\"],\n \"IoU.pole\": 100.0 * results[\"classScores\"][\"pole\"],\n \"IoU.traffic light\": 100.0 * results[\"classScores\"][\"traffic light\"],\n \"IoU.traffic sign\": 100.0 * results[\"classScores\"][\"traffic sign\"],\n \"IoU.vegetation\": 100.0 * results[\"classScores\"][\"vegetation\"],\n \"IoU.terrain\": 100.0 * results[\"classScores\"][\"terrain\"],\n \"IoU.sky\": 100.0 * results[\"classScores\"][\"sky\"],\n \"IoU.person\": 100.0 * results[\"classScores\"][\"person\"],\n \"IoU.rider\": 100.0 * results[\"classScores\"][\"rider\"],\n \"IoU.car\": 100.0 * results[\"classScores\"][\"car\"],\n \"IoU.truck\": 100.0 * results[\"classScores\"][\"truck\"],\n \"IoU.bus\": 100.0 * results[\"classScores\"][\"bus\"],\n \"IoU.train\": 100.0 * results[\"classScores\"][\"train\"],\n \"IoU.motorcycle\": 100.0 * results[\"classScores\"][\"motorcycle\"],\n \"IoU.bicycle\": 100.0 * results[\"classScores\"][\"bicycle\"],\n }\n if not self._save_pl:\n self._working_dir.cleanup()\n return ret" }, { "identifier": "ClassicalSemSegEvaluator", "path": "clouds/evaluation/semantic_evaluation.py", "snippet": "class ClassicalSemSegEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate semantic segmentation metrics.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n distributed=True,\n output_dir=None,\n *,\n sem_seg_loading_fn=load_image_into_numpy_array,\n num_classes=None,\n ignore_label=None,\n save_pl=False,\n ):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n distributed (bool): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): an output directory to dump results.\n sem_seg_loading_fn: function to read sem seg file and load into numpy array.\n Default provided, but projects can customize.\n num_classes, ignore_label: deprecated argument\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if num_classes is not None:\n self._logger.warn(\n \"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata.\"\n )\n if ignore_label is not None:\n self._logger.warn(\n \"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata.\"\n )\n self._dataset_name = dataset_name\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n\n self.input_file_to_gt_file = {\n dataset_record[\"file_name\"]: dataset_record[\"sem_seg_file_name\"]\n for dataset_record in DatasetCatalog.get(dataset_name)\n }\n\n meta = MetadataCatalog.get(dataset_name)\n # Dict that maps contiguous training ids to COCO category ids\n try:\n c2d = meta.stuff_dataset_id_to_contiguous_id\n self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}\n except AttributeError:\n self._contiguous_id_to_dataset_id = None\n self._class_names = meta.stuff_classes\n self.sem_seg_loading_fn = sem_seg_loading_fn\n self._num_classes = len(meta.stuff_classes)\n if num_classes is not None:\n assert (\n self._num_classes == num_classes\n ), f\"{self._num_classes} != {num_classes}\"\n self._ignore_label = (\n ignore_label if ignore_label is not None else meta.ignore_label\n )\n\n # This is because cv2.erode did not work for int datatype. Only works for uint8.\n self._compute_boundary_iou = True\n if not _CV2_IMPORTED:\n self._compute_boundary_iou = False\n self._logger.warn(\n \"\"\"Boundary IoU calculation requires OpenCV. B-IoU metrics are\n not going to be computed because OpenCV is not available to import.\"\"\"\n )\n if self._num_classes >= np.iinfo(np.uint8).max:\n self._compute_boundary_iou = False\n self._logger.warn(\n f\"\"\"SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!\n B-IoU metrics are not going to be computed. Max allowed value (exclusive)\n for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.\n The number of classes of dataset {self._dataset_name} is {self._num_classes}\"\"\"\n )\n self._save_pl = save_pl\n\n def reset(self):\n self._conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._b_conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a model.\n It is a list of dicts. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\".\n outputs: the outputs of a model. It is either list of semantic segmentation predictions\n (Tensor [H, W]) or list of dicts with key \"sem_seg\" that contains semantic\n segmentation prediction in the same format.\n \"\"\"\n for input, output in zip(inputs, outputs):\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device)\n pred = np.array(output, dtype=int)\n gt = input[\"sem_seg\"].numpy()\n\n gt[gt == self._ignore_label] = self._num_classes\n\n self._conf_matrix += np.bincount(\n (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._compute_boundary_iou:\n b_gt = self._mask_to_boundary(gt.astype(np.uint8))\n b_pred = self._mask_to_boundary(pred.astype(np.uint8))\n\n self._b_conf_matrix += np.bincount(\n (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._save_pl:\n self._predictions.extend(\n [dict(file_name=input[\"file_name\"], pred=pred)]\n )\n else:\n self._predictions.extend(\n self.encode_json_sem_seg(pred, input[\"file_name\"])\n )\n\n def evaluate(self):\n \"\"\"\n Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):\n\n * Mean intersection-over-union averaged across classes (mIoU)\n * Frequency Weighted IoU (fwIoU)\n * Mean pixel accuracy averaged across classes (mACC)\n * Pixel Accuracy (pACC)\n \"\"\"\n if self._distributed:\n synchronize()\n conf_matrix_list = all_gather(self._conf_matrix)\n b_conf_matrix_list = all_gather(self._b_conf_matrix)\n self._predictions = all_gather(self._predictions)\n self._predictions = list(itertools.chain(*self._predictions))\n if not is_main_process():\n return\n\n self._conf_matrix = np.zeros_like(self._conf_matrix)\n for conf_matrix in conf_matrix_list:\n self._conf_matrix += conf_matrix\n\n self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)\n for b_conf_matrix in b_conf_matrix_list:\n self._b_conf_matrix += b_conf_matrix\n\n if self._output_dir:\n first_elem = self._predictions[0]\n if \"bdd\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"bdd_eval_pl\")\n elif \"mapillary\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"mapillary_eval_pl\")\n PathManager.mkdirs(self._output_dir)\n if self._save_pl:\n # A function that will iterate over the list of dictionnaries and write the corresponding image\n # in the output directory\n def write_image_from_dict(dict):\n filename = os.path.join(\n self._output_dir,\n dict[\"file_name\"].split(\"/\")[-1].split(\".\")[0] + \"_pred.png\",\n )\n pred = dict[\"pred\"]\n pred = get_rgb_from_semantic_map_maxed(pred)\n # pred = Image.fromarray(pred)\n pred.save(filename)\n\n # We apply the function to the list of dictionnaries\n list(map(write_image_from_dict, self._predictions))\n\n else:\n file_path = os.path.join(self._output_dir, \"sem_seg_predictions.json\")\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._predictions))\n\n acc = np.full(self._num_classes, np.nan, dtype=float)\n iou = np.full(self._num_classes, np.nan, dtype=float)\n tp = self._conf_matrix.diagonal()[:-1].astype(float)\n pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)\n class_weights = pos_gt / np.sum(pos_gt)\n pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)\n acc_valid = pos_gt > 0\n acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]\n union = pos_gt + pos_pred - tp\n iou_valid = np.logical_and(acc_valid, union > 0)\n iou[iou_valid] = tp[iou_valid] / union[iou_valid]\n macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)\n miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)\n fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])\n pacc = np.sum(tp) / np.sum(pos_gt)\n\n if self._compute_boundary_iou:\n b_iou = np.full(self._num_classes, np.nan, dtype=float)\n b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)\n b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)\n b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)\n b_union = b_pos_gt + b_pos_pred - b_tp\n b_iou_valid = b_union > 0\n b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]\n\n res = {}\n res[\"mIoU\"] = 100 * miou\n res[\"fwIoU\"] = 100 * fiou\n for i, name in enumerate(self._class_names):\n res[f\"IoU-{name}\"] = 100 * iou[i]\n if self._compute_boundary_iou:\n res[f\"BoundaryIoU-{name}\"] = 100 * b_iou[i]\n res[f\"min(IoU, B-Iou)-{name}\"] = 100 * min(iou[i], b_iou[i])\n res[\"mACC\"] = 100 * macc\n res[\"pACC\"] = 100 * pacc\n for i, name in enumerate(self._class_names):\n res[f\"ACC-{name}\"] = 100 * acc[i]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"sem_seg_evaluation.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(res, f)\n results = OrderedDict({\"sem_seg\": res})\n self._logger.info(results)\n\n def get_miou_value_from_dict(dict, subkey):\n for key, value in dict.items():\n if subkey in key and \"IoU\" in key:\n if np.isnan(value):\n return 0\n else:\n return value\n\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": results[\"sem_seg\"][\"mIoU\"],\n \"IoU.road\": get_miou_value_from_dict(results[\"sem_seg\"], \"road\"),\n \"IoU.sidewalk\": get_miou_value_from_dict(results[\"sem_seg\"], \"sidewalk\"),\n \"IoU.building\": get_miou_value_from_dict(results[\"sem_seg\"], \"building\"),\n \"IoU.wall\": get_miou_value_from_dict(results[\"sem_seg\"], \"wall\"),\n \"IoU.fence\": get_miou_value_from_dict(results[\"sem_seg\"], \"fence\"),\n \"IoU.pole\": get_miou_value_from_dict(results[\"sem_seg\"], \"pole\"),\n \"IoU.traffic light\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic light\"\n ),\n \"IoU.traffic sign\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic sign\"\n ),\n \"IoU.vegetation\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"vegetation\"\n ),\n \"IoU.terrain\": get_miou_value_from_dict(results[\"sem_seg\"], \"terrain\"),\n \"IoU.sky\": get_miou_value_from_dict(results[\"sem_seg\"], \"sky\"),\n \"IoU.person\": get_miou_value_from_dict(results[\"sem_seg\"], \"person\"),\n \"IoU.rider\": get_miou_value_from_dict(results[\"sem_seg\"], \"rider\"),\n \"IoU.car\": get_miou_value_from_dict(results[\"sem_seg\"], \"car\"),\n \"IoU.truck\": get_miou_value_from_dict(results[\"sem_seg\"], \"truck\"),\n \"IoU.bus\": get_miou_value_from_dict(results[\"sem_seg\"], \"bus\"),\n \"IoU.train\": get_miou_value_from_dict(results[\"sem_seg\"], \"train\"),\n \"IoU.motorcycle\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"motorcycle\"\n ),\n \"IoU.bicycle\": get_miou_value_from_dict(results[\"sem_seg\"], \"bicycle\"),\n }\n return ret\n\n def encode_json_sem_seg(self, sem_seg, input_file_name):\n \"\"\"\n Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.\n See http://cocodataset.org/#format-results\n \"\"\"\n json_list = []\n for label in np.unique(sem_seg):\n if self._contiguous_id_to_dataset_id is not None:\n assert (\n label in self._contiguous_id_to_dataset_id\n ), \"Label {} is not in the metadata info for {}\".format(\n label, self._dataset_name\n )\n dataset_id = self._contiguous_id_to_dataset_id[label]\n else:\n dataset_id = int(label)\n mask = (sem_seg == label).astype(np.uint8)\n mask_rle = mask_util.encode(np.array(mask[:, :, None], order=\"F\"))[0]\n mask_rle[\"counts\"] = mask_rle[\"counts\"].decode(\"utf-8\")\n json_list.append(\n {\n \"file_name\": input_file_name,\n \"category_id\": dataset_id,\n \"segmentation\": mask_rle,\n }\n )\n return json_list\n\n def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):\n assert mask.ndim == 2, \"mask_to_boundary expects a 2-dimensional image\"\n h, w = mask.shape\n diag_len = np.sqrt(h ** 2 + w ** 2)\n dilation = max(1, int(round(dilation_ratio * diag_len)))\n kernel = np.ones((3, 3), dtype=np.uint8)\n\n padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)\n eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)\n eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]\n boundary = mask - eroded_mask\n return boundary" }, { "identifier": "PersoEvalHook", "path": "clouds/engine/hooks.py", "snippet": "class PersoEvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function, eval_after_train=True):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`. Set to 0 to\n not evaluate periodically (but still evaluate after the last iteration\n if `eval_after_train` is True).\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n eval_after_train (bool): whether to evaluate after the last iteration\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n self._eval_after_train = eval_after_train\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n if \"debug\" in self.trainer.cfg.OUTPUT_DIR:\n pass\n else:\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(\n **flattened_results, smoothing_hint=False\n )\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n if self._period > 0 and next_iter % self._period == 0:\n # do the last eval in after_train\n if next_iter != self.trainer.max_iter:\n self._do_eval()\n\n def after_train(self):\n # This condition is to prevent the eval from running after a failed training\n if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:\n self._do_eval()\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func" }, { "identifier": "WandbWriter", "path": "clouds/utils/events.py", "snippet": "class WandbWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n log_dir (str): the directory to save the output events\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._last_write = -1\n self._group_rules = [\n (IsIn(\"/\"), BaseRule()),\n (IsIn(\"loss\"), Prefix(\"train\")),\n # (IsIn(\"sem_seg\"), Prefix(\"val\")),\n (\n IsInList([\"lr\", \"time\", \"eta_seconds\", \"rank_data_time\", \"data_time\"]),\n Prefix(\"stats\"),\n ),\n ]\n\n def write(self):\n storage = get_event_storage()\n\n def _group_name(scalar_name):\n for rule, op in self._group_rules:\n if rule(scalar_name):\n return op(scalar_name)\n return scalar_name\n\n stats = {\n _group_name(name): scalars[0]\n for name, scalars in storage.latest().items()\n if scalars[1] > self._last_write\n }\n if len(stats) > 0:\n self._last_write = max([v[1] for k, v in storage.latest().items()])\n\n # storage.put_{image,histogram} is only meant to be used by\n # tensorboard writer. So we access its internal fields directly from here.\n if len(storage._vis_data) >= 1:\n stats[\"image\"] = [\n wandb.Image(img, caption=img_name)\n for img_name, img, step_num in storage._vis_data\n ]\n # Storage stores all image data and rely on this writer to clear them.\n # As a result it assumes only one writer will use its image data.\n # An alternative design is to let storage store limited recent\n # data (e.g. only the most recent image) that all writers can access.\n # In that case a writer may not see all image data if its period is long.\n storage.clear_images()\n\n if len(storage._histograms) >= 1:\n\n def create_bar(tag, bucket_limits, bucket_counts, **kwargs):\n data = [\n [label, val] for (label, val) in zip(bucket_limits, bucket_counts)\n ]\n table = wandb.Table(data=data, columns=[\"label\", \"value\"])\n return wandb.plot.bar(table, \"label\", \"value\", title=tag)\n\n stats[\"hist\"] = [create_bar(**params) for params in storage._histograms]\n\n storage.clear_histograms()\n\n if len(stats) == 0:\n return\n wandb.log(stats, step=storage.iter)\n\n def close(self):\n wandb.finish()" }, { "identifier": "setup_wandb", "path": "clouds/utils/events.py", "snippet": "def setup_wandb(cfg, args):\n if comm.is_main_process():\n init_args = {\n k.lower(): v\n for k, v in cfg.WANDB.items()\n if isinstance(k, str) and k not in [\"config\", \"name\"]\n }\n if \"config_exclude_keys\" in init_args:\n init_args[\"config\"] = cfg\n init_args[\"config\"][\"cfg_file\"] = args.config_file\n else:\n init_args[\"config\"] = {\n \"output_dir\": cfg.OUTPUT_DIR,\n \"train\": extract_dataset_from_string(cfg.DATASETS.TRAIN),\n \"test\": extract_dataset_from_string(cfg.DATASETS.TEST),\n \"iter\": cfg.SOLVER.MAX_ITER,\n \"lr\": cfg.SOLVER.BASE_LR,\n \"batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"cfg_file\": args.config_file,\n }\n\n init_args[\"group\"] = get_base_name(cfg)\n if cfg.WANDB.NAME is not None:\n init_args[\"name\"] = cfg.WANDB.NAME\n else:\n init_args[\"name\"] = get_full_name_xp(init_args[\"group\"], cfg)\n if \"debug\" in cfg.OUTPUT_DIR:\n init_args[\"project\"] = \"debug\"\n wandb.init(**init_args)" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
12,300
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead.
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead.
writers[-1] = WandbWriter()
10
2023-12-15 15:40:58+00:00
16k
modelscope/scepter
scepter/modules/inference/diffusion_inference.py
[ { "identifier": "GaussianDiffusion", "path": "scepter/modules/model/network/diffusion/diffusion.py", "snippet": "class GaussianDiffusion(object):\n def __init__(self, sigmas, prediction_type='eps'):\n assert prediction_type in {'x0', 'eps', 'v'}\n self.sigmas = sigmas # noise coefficients\n self.alphas = torch.sqrt(1 - sigmas**2) # signal coefficients\n self.num_timesteps = len(sigmas)\n self.prediction_type = prediction_type\n\n def diffuse(self, x0, t, noise=None):\n \"\"\"\n Add Gaussian noise to signal x0 according to:\n q(x_t | x_0) = N(x_t | alpha_t x_0, sigma_t^2 I).\n \"\"\"\n noise = torch.randn_like(x0) if noise is None else noise\n xt = _i(self.alphas, t, x0) * x0 + _i(self.sigmas, t, x0) * noise\n return xt\n\n def denoise(self,\n xt,\n t,\n s,\n model,\n model_kwargs={},\n guide_scale=None,\n guide_rescale=None,\n clamp=None,\n percentile=None,\n cat_uc=False):\n \"\"\"\n Apply one step of denoising from the posterior distribution q(x_s | x_t, x0).\n Since x0 is not available, estimate the denoising results using the learned\n distribution p(x_s | x_t, \\hat{x}_0 == f(x_t)). # noqa\n \"\"\"\n s = t - 1 if s is None else s\n\n # hyperparams\n sigmas = _i(self.sigmas, t, xt)\n alphas = _i(self.alphas, t, xt)\n alphas_s = _i(self.alphas, s.clamp(0), xt)\n alphas_s[s < 0] = 1.\n sigmas_s = torch.sqrt(1 - alphas_s**2)\n\n # precompute variables\n betas = 1 - (alphas / alphas_s)**2\n coef1 = betas * alphas_s / sigmas**2\n coef2 = (alphas * sigmas_s**2) / (alphas_s * sigmas**2)\n var = betas * (sigmas_s / sigmas)**2\n log_var = torch.log(var).clamp_(-20, 20)\n\n # prediction\n if guide_scale is None:\n assert isinstance(model_kwargs, dict)\n out = model(xt, t=t, **model_kwargs)\n else:\n # classifier-free guidance (arXiv:2207.12598)\n # model_kwargs[0]: conditional kwargs\n # model_kwargs[1]: non-conditional kwargs\n assert isinstance(model_kwargs, list) and len(model_kwargs) == 2\n\n if guide_scale == 1.:\n out = model(xt, t=t, **model_kwargs[0])\n else:\n if cat_uc:\n\n def parse_model_kwargs(prev_value, value):\n if isinstance(value, torch.Tensor):\n prev_value = torch.cat([prev_value, value], dim=0)\n elif isinstance(value, dict):\n for k, v in value.items():\n prev_value[k] = parse_model_kwargs(\n prev_value[k], v)\n elif isinstance(value, list):\n for idx, v in enumerate(value):\n prev_value[idx] = parse_model_kwargs(\n prev_value[idx], v)\n return prev_value\n\n all_model_kwargs = copy.deepcopy(model_kwargs[0])\n for model_kwarg in model_kwargs[1:]:\n for key, value in model_kwarg.items():\n all_model_kwargs[key] = parse_model_kwargs(\n all_model_kwargs[key], value)\n all_out = model(xt.repeat(2, 1, 1, 1),\n t=t.repeat(2),\n **all_model_kwargs)\n y_out, u_out = all_out.chunk(2)\n else:\n y_out = model(xt, t=t, **model_kwargs[0])\n u_out = model(xt, t=t, **model_kwargs[1])\n out = u_out + guide_scale * (y_out - u_out)\n\n # rescale the output according to arXiv:2305.08891\n if guide_rescale is not None:\n assert guide_rescale >= 0 and guide_rescale <= 1\n ratio = (y_out.flatten(1).std(dim=1) /\n (out.flatten(1).std(dim=1) +\n 1e-12)).view((-1, ) + (1, ) * (y_out.ndim - 1))\n out *= guide_rescale * ratio + (1 - guide_rescale) * 1.0\n # compute x0\n if self.prediction_type == 'x0':\n x0 = out\n elif self.prediction_type == 'eps':\n x0 = (xt - sigmas * out) / alphas\n elif self.prediction_type == 'v':\n x0 = alphas * xt - sigmas * out\n else:\n raise NotImplementedError(\n f'prediction_type {self.prediction_type} not implemented')\n\n # restrict the range of x0\n if percentile is not None:\n # NOTE: percentile should only be used when data is within range [-1, 1]\n assert percentile > 0 and percentile <= 1\n s = torch.quantile(x0.flatten(1).abs(), percentile, dim=1)\n s = s.clamp_(1.0).view((-1, ) + (1, ) * (xt.ndim - 1))\n x0 = torch.min(s, torch.max(-s, x0)) / s\n elif clamp is not None:\n x0 = x0.clamp(-clamp, clamp)\n\n # recompute eps using the restricted x0\n eps = (xt - alphas * x0) / sigmas\n\n # compute mu (mean of posterior distribution) using the restricted x0\n mu = coef1 * x0 + coef2 * xt\n return mu, var, log_var, x0, eps\n\n def loss(self,\n x0,\n t,\n model,\n model_kwargs={},\n reduction='mean',\n noise=None,\n **kwargs):\n # hyperparams\n sigmas = _i(self.sigmas, t, x0)\n alphas = _i(self.alphas, t, x0)\n\n # diffuse and denoise\n if noise is None:\n noise = torch.randn_like(x0)\n xt = self.diffuse(x0, t, noise)\n out = model(xt, t=t, **model_kwargs, **kwargs)\n\n # mse loss\n target = {\n 'eps': noise,\n 'x0': x0,\n 'v': alphas * noise - sigmas * x0\n }[self.prediction_type]\n loss = (out - target).pow(2)\n if reduction == 'mean':\n loss = loss.flatten(1).mean(dim=1)\n return loss\n\n @torch.no_grad()\n def sample(self,\n noise,\n model,\n x=None,\n denoising_strength=1.0,\n refine_stage=False,\n refine_strength=0.0,\n model_kwargs={},\n condition_fn=None,\n guide_scale=None,\n guide_rescale=None,\n clamp=None,\n percentile=None,\n solver='euler_a',\n steps=20,\n t_max=None,\n t_min=None,\n discretization=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n show_progress=False,\n seed=-1,\n intermediate_callback=None,\n cat_uc=False,\n **kwargs):\n # sanity check\n assert isinstance(steps, (int, torch.LongTensor))\n assert t_max is None or (t_max > 0 and t_max <= self.num_timesteps - 1)\n assert t_min is None or (t_min >= 0 and t_min < self.num_timesteps - 1)\n assert discretization in (None, 'leading', 'linspace', 'trailing')\n assert discard_penultimate_step in (None, True, False)\n assert return_intermediate in (None, 'x0', 'xt')\n\n # function of diffusion solver\n solver_fn = {\n 'ddim': sample_ddim,\n 'euler_ancestral': sample_euler_ancestral,\n 'euler': sample_euler,\n 'heun': sample_heun,\n 'dpm2': sample_dpm_2,\n 'dpm2_ancestral': sample_dpm_2_ancestral,\n 'dpmpp_2s_ancestral': sample_dpmpp_2s_ancestral,\n 'dpmpp_2m': sample_dpmpp_2m,\n 'dpmpp_sde': sample_dpmpp_sde,\n 'dpmpp_2m_sde': sample_dpmpp_2m_sde,\n 'dpm2_karras': sample_dpm_2,\n 'dpm2_ancestral_karras': sample_dpm_2_ancestral,\n 'dpmpp_2s_ancestral_karras': sample_dpmpp_2s_ancestral,\n 'dpmpp_2m_karras': sample_dpmpp_2m,\n 'dpmpp_sde_karras': sample_dpmpp_sde,\n 'dpmpp_2m_sde_karras': sample_dpmpp_2m_sde\n }[solver]\n\n # options\n schedule = 'karras' if 'karras' in solver else None\n discretization = discretization or 'linspace'\n seed = seed if seed >= 0 else random.randint(0, 2**31)\n if isinstance(steps, torch.LongTensor):\n discard_penultimate_step = False\n if discard_penultimate_step is None:\n discard_penultimate_step = True if solver in (\n 'dpm2', 'dpm2_ancestral', 'dpmpp_2m_sde', 'dpm2_karras',\n 'dpm2_ancestral_karras', 'dpmpp_2m_sde_karras') else False\n\n # function for denoising xt to get x0\n intermediates = []\n\n def model_fn(xt, sigma):\n # denoising\n t = self._sigma_to_t(sigma).repeat(len(xt)).round().long()\n x0 = self.denoise(xt,\n t,\n None,\n model,\n model_kwargs,\n guide_scale,\n guide_rescale,\n clamp,\n percentile,\n cat_uc=cat_uc)[-2]\n\n # collect intermediate outputs\n if return_intermediate == 'xt':\n intermediates.append(xt)\n elif return_intermediate == 'x0':\n intermediates.append(x0)\n if intermediate_callback is not None:\n intermediate_callback(intermediates[-1])\n return x0\n\n # get timesteps\n if isinstance(steps, int):\n steps += 1 if discard_penultimate_step else 0\n t_max = self.num_timesteps - 1 if t_max is None else t_max\n t_min = 0 if t_min is None else t_min\n\n # discretize timesteps\n if discretization == 'leading':\n steps = torch.arange(t_min, t_max + 1,\n (t_max - t_min + 1) / steps).flip(0)\n elif discretization == 'linspace':\n steps = torch.linspace(t_max, t_min, steps)\n elif discretization == 'trailing':\n steps = torch.arange(t_max, t_min - 1,\n -((t_max - t_min + 1) / steps))\n else:\n raise NotImplementedError(\n f'{discretization} discretization not implemented')\n steps = steps.clamp_(t_min, t_max)\n steps = torch.as_tensor(steps,\n dtype=torch.float32,\n device=noise.device)\n\n # get sigmas\n sigmas = self._t_to_sigma(steps)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n t_enc = int(min(denoising_strength, 0.999) * len(steps))\n sigmas = sigmas[len(steps) - t_enc - 1:]\n if refine_strength > 0:\n t_refine = int(min(refine_strength, 0.999) * len(steps))\n if refine_stage:\n sigmas = sigmas[-t_refine:]\n else:\n sigmas = sigmas[:-t_refine + 1]\n # print(sigmas)\n if x is not None:\n noise = (x + noise * sigmas[0]) / torch.sqrt(1.0 + sigmas[0]**2.0)\n\n if schedule == 'karras':\n if sigmas[0] == float('inf'):\n sigmas = karras_schedule(\n n=len(steps) - 1,\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas[sigmas < float('inf')].max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([\n sigmas.new_tensor([float('inf')]), sigmas,\n sigmas.new_zeros([1])\n ])\n else:\n sigmas = karras_schedule(\n n=len(steps),\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas.max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n if discard_penultimate_step:\n sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])\n kwargs['seed'] = seed\n # sampling\n x0 = solver_fn(noise,\n model_fn,\n sigmas,\n show_progress=show_progress,\n **kwargs)\n return (x0, intermediates) if return_intermediate is not None else x0\n\n def _sigma_to_t(self, sigma):\n if sigma == float('inf'):\n t = torch.full_like(sigma, len(self.sigmas) - 1)\n else:\n log_sigmas = torch.sqrt(self.sigmas**2 /\n (1 - self.sigmas**2)).log().to(sigma)\n log_sigma = sigma.log()\n dists = log_sigma - log_sigmas[:, None]\n low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(\n max=log_sigmas.shape[0] - 2)\n high_idx = low_idx + 1\n low, high = log_sigmas[low_idx], log_sigmas[high_idx]\n w = (low - log_sigma) / (low - high)\n w = w.clamp(0, 1)\n t = (1 - w) * low_idx + w * high_idx\n t = t.view(sigma.shape)\n if t.ndim == 0:\n t = t.unsqueeze(0)\n return t\n\n def _t_to_sigma(self, t):\n t = t.float()\n low_idx, high_idx, w = t.floor().long(), t.ceil().long(), t.frac()\n log_sigmas = torch.sqrt(self.sigmas**2 /\n (1 - self.sigmas**2)).log().to(t)\n log_sigma = (1 - w) * log_sigmas[low_idx] + w * log_sigmas[high_idx]\n log_sigma[torch.isnan(log_sigma)\n | torch.isinf(log_sigma)] = float('inf')\n return log_sigma.exp()\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, steps):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n\n t_max = None\n t_min = None\n\n # discretization method\n discretization = 'trailing' if self.prediction_type == 'v' else 'leading'\n\n # timesteps\n if isinstance(steps, int):\n t_max = self.num_timesteps - 1 if t_max is None else t_max\n t_min = 0 if t_min is None else t_min\n steps = discretize_timesteps(t_max, t_min, steps, discretization)\n steps = torch.as_tensor(steps).round().long().flip(0).to(x0.device)\n # steps = torch.as_tensor(steps).round().long().to(x0.device)\n\n # self.alphas_bar = torch.cumprod(1 - self.sigmas ** 2, dim=0)\n # print('sigma: ', self.sigmas, len(self.sigmas))\n # print('alpha_bar: ', self.alphas_bar, len(self.alphas_bar))\n # print('steps: ', steps, len(steps))\n # sqrt_alphas_cumprod = torch.sqrt(self.alphas_bar).to(x0.device)[steps]\n # sqrt_one_minus_alphas_cumprod = torch.sqrt(1 - self.alphas_bar).to(x0.device)[steps]\n\n sqrt_alphas_cumprod = self.alphas.to(x0.device)[steps]\n sqrt_one_minus_alphas_cumprod = self.sigmas.to(x0.device)[steps]\n # print('sigma: ', self.sigmas, len(self.sigmas))\n # print('alpha: ', self.alphas, len(self.alphas))\n # print('steps: ', steps, len(steps))\n\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) *\n noise)\n\n @torch.no_grad()\n def sample_img2img(self,\n x,\n noise,\n model,\n denoising_strength=1,\n model_kwargs={},\n condition_fn=None,\n guide_scale=None,\n guide_rescale=None,\n clamp=None,\n percentile=None,\n solver='euler_a',\n steps=20,\n t_max=None,\n t_min=None,\n discretization=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n show_progress=False,\n seed=-1,\n **kwargs):\n # sanity check\n assert isinstance(steps, (int, torch.LongTensor))\n assert t_max is None or (t_max > 0 and t_max <= self.num_timesteps - 1)\n assert t_min is None or (t_min >= 0 and t_min < self.num_timesteps - 1)\n assert discretization in (None, 'leading', 'linspace', 'trailing')\n assert discard_penultimate_step in (None, True, False)\n assert return_intermediate in (None, 'x0', 'xt')\n # function of diffusion solver\n solver_fn = {\n 'euler_ancestral': sample_img2img_euler_ancestral,\n 'euler': sample_img2img_euler,\n }[solver]\n # options\n schedule = 'karras' if 'karras' in solver else None\n discretization = discretization or 'linspace'\n seed = seed if seed >= 0 else random.randint(0, 2**31)\n if isinstance(steps, torch.LongTensor):\n discard_penultimate_step = False\n if discard_penultimate_step is None:\n discard_penultimate_step = True if solver in (\n 'dpm2', 'dpm2_ancestral', 'dpmpp_2m_sde', 'dpm2_karras',\n 'dpm2_ancestral_karras', 'dpmpp_2m_sde_karras') else False\n\n # function for denoising xt to get x0\n intermediates = []\n\n def get_scalings(sigma):\n c_out = -sigma\n c_in = 1 / (sigma**2 + 1.**2)**0.5\n return c_out, c_in\n\n def model_fn(xt, sigma):\n # denoising\n c_out, c_in = get_scalings(sigma)\n t = self._sigma_to_t(sigma).repeat(len(xt)).round().long()\n\n x0 = self.denoise(xt * c_in, t, None, model, model_kwargs,\n guide_scale, guide_rescale, clamp,\n percentile)[-2]\n # collect intermediate outputs\n if return_intermediate == 'xt':\n intermediates.append(xt)\n elif return_intermediate == 'x0':\n intermediates.append(x0)\n return xt + x0 * c_out\n\n # get timesteps\n if isinstance(steps, int):\n steps += 1 if discard_penultimate_step else 0\n t_max = self.num_timesteps - 1 if t_max is None else t_max\n t_min = 0 if t_min is None else t_min\n # discretize timesteps\n if discretization == 'leading':\n steps = torch.arange(t_min, t_max + 1,\n (t_max - t_min + 1) / steps).flip(0)\n elif discretization == 'linspace':\n steps = torch.linspace(t_max, t_min, steps)\n elif discretization == 'trailing':\n steps = torch.arange(t_max, t_min - 1,\n -((t_max - t_min + 1) / steps))\n else:\n raise NotImplementedError(\n f'{discretization} discretization not implemented')\n steps = steps.clamp_(t_min, t_max)\n steps = torch.as_tensor(steps, dtype=torch.float32, device=x.device)\n # get sigmas\n sigmas = self._t_to_sigma(steps)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n t_enc = int(min(denoising_strength, 0.999) * len(steps))\n sigmas = sigmas[len(steps) - t_enc - 1:]\n noise = x + noise * sigmas[0]\n\n if schedule == 'karras':\n if sigmas[0] == float('inf'):\n sigmas = karras_schedule(\n n=len(steps) - 1,\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas[sigmas < float('inf')].max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([\n sigmas.new_tensor([float('inf')]), sigmas,\n sigmas.new_zeros([1])\n ])\n else:\n sigmas = karras_schedule(\n n=len(steps),\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas.max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n if discard_penultimate_step:\n sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])\n\n # sampling\n x0 = solver_fn(noise,\n model_fn,\n sigmas,\n seed=seed,\n show_progress=show_progress,\n **kwargs)\n return (x0, intermediates) if return_intermediate is not None else x0" }, { "identifier": "noise_schedule", "path": "scepter/modules/model/network/diffusion/schedules.py", "snippet": "def noise_schedule(schedule='logsnr_cosine_interp',\n n=1000,\n zero_terminal_snr=False,\n **kwargs):\n # compute sigmas\n sigmas = {\n 'linear': linear_schedule,\n 'scaled_linear': scaled_linear_schedule,\n 'quadratic': quadratic_schedule,\n 'cosine': cosine_schedule,\n 'sigmoid': sigmoid_schedule,\n 'karras': karras_schedule,\n 'exponential': exponential_schedule,\n 'polyexponential': polyexponential_schedule,\n 'vp': vp_schedule,\n 'logsnr_cosine': logsnr_cosine_schedule,\n 'logsnr_cosine_shifted': logsnr_cosine_shifted_schedule,\n 'logsnr_cosine_interp': logsnr_cosine_interp_schedule\n }[schedule](n, **kwargs)\n\n # post-processing\n if zero_terminal_snr and sigmas.max() != 1.0:\n scale = (1.0 - sigmas.min()) / (sigmas.max() - sigmas.min())\n sigmas = sigmas.min() + scale * (sigmas - sigmas.min())\n return sigmas" }, { "identifier": "BACKBONES", "path": "scepter/modules/model/registry.py", "snippet": "BACKBONES = Registry('BACKBONES', build_func=build_model)" }, { "identifier": "EMBEDDERS", "path": "scepter/modules/model/registry.py", "snippet": "EMBEDDERS = Registry('EMBEDDERS', build_func=build_model)" }, { "identifier": "MODELS", "path": "scepter/modules/model/registry.py", "snippet": "MODELS = Registry('MODELS', build_func=build_model)" }, { "identifier": "TOKENIZERS", "path": "scepter/modules/model/registry.py", "snippet": "TOKENIZERS = Registry('TOKENIZER', build_func=build_model)" }, { "identifier": "TUNERS", "path": "scepter/modules/model/registry.py", "snippet": "TUNERS = Registry('TUNERS', build_func=build_model)" }, { "identifier": "Config", "path": "scepter/modules/utils/config.py", "snippet": "class Config(object):\n def __init__(self,\n cfg_dict={},\n load=True,\n cfg_file=None,\n logger=None,\n parser_ins=None):\n '''\n support to parse json/dict/yaml_file of parameters.\n :param load: whether load parameters or not.\n :param cfg_dict: default None.\n :param cfg_level: default None, means the current cfg-level for recurrent cfg presentation.\n :param logger: logger instance for print the cfg log.\n one examples:\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Argparser for Cate process:\\n\"\n )\n parser.add_argument(\n \"--stage\",\n dest=\"stage\",\n help=\"Running stage!\",\n default=\"train\",\n choices=[\"train\"]\n )\n\n cfg = Config(load=True, parser_ins=parser)\n '''\n # checking that the logger exists or not\n if logger is None:\n self.logger = StdMsg(name='Config')\n else:\n self.logger = logger\n self.cfg_dict = cfg_dict\n if load:\n if cfg_file is None:\n assert parser_ins is not None\n self.args = _parse_args(parser_ins)\n self.load_from_file(self.args.cfg_file)\n # os.environ[\"LAUNCHER\"] = self.args.launcher\n os.environ['DATA_ONLINE'] = str(self.args.data_online).lower()\n os.environ['SHARE_STORAGE'] = str(\n self.args.share_storage).lower()\n os.environ['ES_DEBUG'] = str(self.args.debug).lower()\n else:\n self.load_from_file(cfg_file)\n if 'ENV' not in self.cfg_dict:\n self.cfg_dict['ENV'] = {\n 'SEED': 2023,\n 'USE_PL': False,\n 'BACKEND': 'nccl',\n 'SYNC_BN': False,\n 'CUDNN_DETERMINISTIC': True,\n 'CUDNN_BENCHMARK': False\n }\n self.logger.info(\n f\"ENV is not set and will use default ENV as {self.cfg_dict['ENV']}; \"\n f'If want to change this value, please set them in your config.'\n )\n else:\n if 'SEED' not in self.cfg_dict['ENV']:\n self.cfg_dict['ENV']['SEED'] = 2023\n self.logger.info(\n f\"SEED is not set and will use default SEED as {self.cfg_dict['ENV']['SEED']}; \"\n f'If want to change this value, please set it in your config.'\n )\n os.environ['ES_SEED'] = str(self.cfg_dict['ENV']['SEED'])\n self._update_dict(self.cfg_dict)\n if load:\n self.logger.info(f'Parse cfg file as \\n {self.dump()}')\n\n def load_from_file(self, file_name):\n self.logger.info(f'Loading config from {file_name}')\n if file_name is None or not os.path.exists(file_name):\n self.logger.info(f'File {file_name} does not exist!')\n self.logger.warning(\n f\"Cfg file is None or doesn't exist, Skip loading config from {file_name}.\"\n )\n return\n if file_name.endswith('.json'):\n self.cfg_dict = self._load_json(file_name)\n self.logger.info(\n f'System take {file_name} as json, because we find json in this file'\n )\n elif file_name.endswith('.yaml'):\n self.cfg_dict = self._load_yaml(file_name)\n self.logger.info(\n f'System take {file_name} as yaml, because we find yaml in this file'\n )\n else:\n self.logger.info(\n f'No config file found! Because we do not find json or yaml in --cfg {file_name}'\n )\n\n def _update_dict(self, cfg_dict):\n def recur(key, elem):\n if type(elem) is dict:\n return key, Config(load=False,\n cfg_dict=elem,\n logger=self.logger)\n elif type(elem) is list:\n config_list = []\n for idx, ele in enumerate(elem):\n if type(ele) is str and ele[1:3] == 'e-':\n ele = float(ele)\n config_list.append(ele)\n elif type(ele) is str:\n config_list.append(ele)\n elif type(ele) is dict:\n config_list.append(\n Config(load=False,\n cfg_dict=ele,\n logger=self.logger))\n elif type(ele) is list:\n config_list.append(ele)\n else:\n config_list.append(ele)\n return key, config_list\n else:\n if type(elem) is str and elem[1:3] == 'e-':\n elem = float(elem)\n return key, elem\n\n dic = dict(recur(k, v) for k, v in cfg_dict.items())\n self.__dict__.update(dic)\n\n def _load_json(self, cfg_file):\n '''\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n cfg = json.load(open(file_name, 'r'))\n except Exception as e:\n self.logger.error(f'Load json from {cfg_file} error. Message: {e}')\n sys.exit()\n return cfg\n\n def _load_yaml(self, cfg_file):\n '''\n if replace some parameters from Base, You can reference the base parameters use Base.\n\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n with open(cfg_file, 'r') as f:\n cfg = yaml.load(f.read(), Loader=yaml.SafeLoader)\n except Exception as e:\n self.logger.error(f'Load yaml from {cfg_file} error. Message: {e}')\n sys.exit()\n if '_BASE_RUN' not in cfg.keys() and '_BASE_MODEL' not in cfg.keys(\n ) and '_BASE' not in cfg.keys():\n return cfg\n\n if '_BASE' in cfg.keys():\n if cfg['_BASE'][1] == '.':\n prev_count = cfg['_BASE'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - cfg['_BASE'].count('..'))] +\n cfg['_BASE'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n else:\n if '_BASE_RUN' in cfg.keys():\n if cfg['_BASE_RUN'][1] == '.':\n prev_count = cfg['_BASE_RUN'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - prev_count)] +\n cfg['_BASE_RUN'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_RUN'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base,\n cfg,\n preserve_base=True)\n if '_BASE_MODEL' in cfg.keys():\n if cfg['_BASE_MODEL'][1] == '.':\n prev_count = cfg['_BASE_MODEL'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(\n -1 - cfg['_BASE_MODEL'].count('..'))] +\n cfg['_BASE_MODEL'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_MODEL'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n return cfg\n\n def _path_join(self, path_list):\n path = ''\n for p in path_list:\n path += p + '/'\n return path[:-1]\n\n def items(self):\n return self.cfg_dict.items()\n\n def _merge_cfg_from_base(self, cfg_base, cfg, preserve_base=False):\n for k, v in cfg.items():\n if k in cfg_base.keys():\n if isinstance(v, dict):\n self._merge_cfg_from_base(cfg_base[k], v)\n else:\n cfg_base[k] = v\n else:\n if 'BASE' not in k or preserve_base:\n cfg_base[k] = v\n return cfg_base\n\n def _merge_cfg_from_command(self, args, cfg):\n assert len(\n args.opts\n ) % 2 == 0, f'Override list {args.opts} has odd length: {len(args.opts)}'\n\n keys = args.opts[0::2]\n vals = args.opts[1::2]\n\n # maximum supported depth 3\n for idx, key in enumerate(keys):\n key_split = key.split('.')\n assert len(\n key_split\n ) <= 4, 'Key depth error. \\n Maximum depth: 3\\n Get depth: {}'.format(\n len(key_split))\n assert key_split[0] in cfg.keys(), 'Non-existant key: {}.'.format(\n key_split[0])\n if len(key_split) == 2:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 3:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 4:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[3] in cfg[key_split[0]][key_split[1]][\n key_split[2]].keys(), 'Non-existant key: {}'.format(key)\n\n if len(key_split) == 1:\n cfg[key_split[0]] = vals[idx]\n elif len(key_split) == 2:\n cfg[key_split[0]][key_split[1]] = vals[idx]\n elif len(key_split) == 3:\n cfg[key_split[0]][key_split[1]][key_split[2]] = vals[idx]\n elif len(key_split) == 4:\n cfg[key_split[0]][key_split[1]][key_split[2]][\n key_split[3]] = vals[idx]\n\n return cfg\n\n def __repr__(self):\n return '{}\\n'.format(self.dump())\n\n def dump(self):\n return json.dumps(self.cfg_dict, indent=2)\n\n def deep_copy(self):\n return copy.deepcopy(self)\n\n def have(self, name):\n if name in self.__dict__:\n return True\n return False\n\n def get(self, name, default=None):\n if name in self.__dict__:\n return self.__dict__[name]\n return default\n\n def __getitem__(self, key):\n return self.__dict__.__getitem__(key)\n\n def __setattr__(self, key, value):\n super().__setattr__(key, value)\n if hasattr(self, 'cfg_dict') and key in self.cfg_dict:\n if isinstance(value, Config):\n value = value.cfg_dict\n self.cfg_dict[key] = value\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n self.__setattr__(key, value)\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def set(self, name, value):\n new_dict = {name: value}\n self.__dict__.update(new_dict)\n self.__setattr__(name, value)\n\n def get_dict(self):\n return self.cfg_dict\n\n def get_lowercase_dict(self, cfg_dict=None):\n if cfg_dict is None:\n cfg_dict = self.get_dict()\n config_new = {}\n for key, val in cfg_dict.items():\n if isinstance(key, str):\n if isinstance(val, dict):\n config_new[key.lower()] = self.get_lowercase_dict(val)\n else:\n config_new[key.lower()] = val\n else:\n config_new[key] = val\n return config_new\n\n @staticmethod\n def get_plain_cfg(cfg=None):\n if isinstance(cfg, Config):\n cfg_new = {}\n cfg_dict = cfg.get_dict()\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, dict):\n cfg_new = {}\n cfg_dict = cfg\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, list):\n cfg_new = []\n cfg_list = cfg\n for val in cfg_list:\n if isinstance(val, (Config, dict, list)):\n cfg_new.append(Config.get_plain_cfg(val))\n elif isinstance(val, (str, numbers.Number)):\n cfg_new.append(val)\n return cfg_new\n else:\n return cfg" }, { "identifier": "we", "path": "scepter/modules/utils/distribute.py", "snippet": " def set_random_seed(seed):\ndef get_dist_info():\ndef gather_data(data):\ndef gather_list(data):\ndef gather_picklable(data):\ndef _gather_picklable_custom(data):\ndef gather_gpu_tensors(tensor, all_recv=False, is_cat=True):\ndef broadcast(tensor, src, group=None, **kwargs):\ndef barrier():\ndef get_global_gloo_group():\ndef reduce_scatter(output,\n input_list,\n op=dist.ReduceOp.SUM,\n group=None,\n **kwargs):\ndef all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef _serialize_to_tensor(data):\ndef _unserialize_from_tensor(recv_data):\ndef send(tensor, dst, group=None, **kwargs):\ndef recv(tensor, src=None, group=None, **kwargs):\ndef isend(tensor, dst, group=None, **kwargs):\ndef irecv(tensor, src=None, group=None, **kwargs):\ndef scatter(data, scatter_list=None, src=0, group=None, **kwargs):\ndef shared_random_seed():\ndef mp_worker(gpu, ngpus_per_node, cfg, fn, pmi_rank, world_size, work_env):\n def __init__(self):\n def init_env(self, config, fn, logger=None):\n def get_env(self):\n def set_env(self, we_env):\n def __str__(self):\nclass Workenv(object):" }, { "identifier": "FS", "path": "scepter/modules/utils/file_system.py", "snippet": "FS = FileSystem()" } ]
import copy import hashlib import json import os.path import random import torch import torch.nn as nn import torch.nn.functional as F import torchvision.transforms as TT from collections import OrderedDict from peft.utils import CONFIG_NAME, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME from PIL.Image import Image from swift import Swift, SwiftModel from scepter.modules.model.network.diffusion.diffusion import GaussianDiffusion from scepter.modules.model.network.diffusion.schedules import noise_schedule from scepter.modules.model.registry import (BACKBONES, EMBEDDERS, MODELS, TOKENIZERS, TUNERS) from scepter.modules.utils.config import Config from scepter.modules.utils.distribute import we from scepter.modules.utils.file_system import FS from safetensors.torch import \ load_file as safe_load_file from safetensors.torch import \ save_file as safe_save_file from safetensors.torch import load_file as load_safetensors from safetensors.torch import load_file as load_safetensors
12,587
cond_stage_model = OrderedDict() diffusion_model = OrderedDict() for k, v in sd.items(): if k.startswith('first_stage_model.'): first_stage_model[k.replace( 'first_stage_model.', '')] = v elif k.startswith('conditioner.'): cond_stage_model[k.replace('conditioner.', '')] = v elif k.startswith('cond_stage_model.'): if k.startswith('cond_stage_model.model.'): cond_stage_model[k.replace( 'cond_stage_model.model.', '')] = v else: cond_stage_model[k.replace( 'cond_stage_model.', '')] = v elif k.startswith('model.diffusion_model.'): diffusion_model[k.replace('model.diffusion_model.', '')] = v else: continue if cfg.have('FIRST_STAGE_MODEL'): with open(first_stage_model_path + 'cache', 'wb') as f: torch.save(first_stage_model, f) os.rename(first_stage_model_path + 'cache', first_stage_model_path) self.logger.info( 'First stage model has been processed.') if cfg.have('COND_STAGE_MODEL'): with open(cond_stage_model_path + 'cache', 'wb') as f: torch.save(cond_stage_model, f) os.rename(cond_stage_model_path + 'cache', cond_stage_model_path) self.logger.info( 'Cond stage model has been processed.') if cfg.have('DIFFUSION_MODEL'): with open(diffusion_model_path + 'cache', 'wb') as f: torch.save(diffusion_model, f) os.rename(diffusion_model_path + 'cache', diffusion_model_path) self.logger.info('Diffusion model has been processed.') if not cfg.FIRST_STAGE_MODEL.get('PRETRAINED_MODEL', None): cfg.FIRST_STAGE_MODEL.PRETRAINED_MODEL = first_stage_model_path else: cfg.FIRST_STAGE_MODEL.RELOAD_MODEL = first_stage_model_path if not cfg.COND_STAGE_MODEL.get('PRETRAINED_MODEL', None): cfg.COND_STAGE_MODEL.PRETRAINED_MODEL = cond_stage_model_path else: cfg.COND_STAGE_MODEL.RELOAD_MODEL = cond_stage_model_path if not cfg.DIFFUSION_MODEL.get('PRETRAINED_MODEL', None): cfg.DIFFUSION_MODEL.PRETRAINED_MODEL = diffusion_model_path else: cfg.DIFFUSION_MODEL.RELOAD_MODEL = diffusion_model_path return cfg def init_from_modules(self, modules): for k, v in modules.items(): self.__setattr__(k, v) def infer_model(self, cfg, module_paras=None): module = { 'model': None, 'cfg': cfg, 'device': 'offline', 'name': cfg.NAME, 'function_info': {}, 'paras': {} } if module_paras is None: return module function_info = {} paras = { k.lower(): v for k, v in module_paras.get('PARAS', {}).items() } for function in module_paras.get('FUNCTION', []): input_dict = {} for inp in function.get('INPUT', []): if inp.lower() in self.input: input_dict[inp.lower()] = self.input[inp.lower()] function_info[function.NAME] = { 'dtype': function.get('DTYPE', 'float32'), 'input': input_dict } module['paras'] = paras module['function_info'] = function_info return module def init_from_ckpt(self, path, model, ignore_keys=list()): if path.endswith('safetensors'): sd = load_safetensors(path) else: sd = torch.load(path, map_location='cpu') new_sd = OrderedDict() for k, v in sd.items(): ignored = False for ik in ignore_keys: if ik in k: if we.rank == 0: self.logger.info( 'Ignore key {} from state_dict.'.format(k)) ignored = True break if not ignored: new_sd[k] = v missing, unexpected = model.load_state_dict(new_sd, strict=False) if we.rank == 0: self.logger.info( f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' ) if len(missing) > 0: self.logger.info(f'Missing Keys:\n {missing}') if len(unexpected) > 0: self.logger.info(f'\nUnexpected Keys:\n {unexpected}') def load(self, module): if module['device'] == 'offline': if module['cfg'].NAME in MODELS.class_map: model = MODELS.build(module['cfg'], logger=self.logger).eval()
# -*- coding: utf-8 -*- def get_model(model_tuple): assert 'model' in model_tuple return model_tuple['model'] class DiffusionInference(): ''' define vae, unet, text-encoder, tuner, refiner components support to load the components dynamicly. create and load model when run this model at the first time. ''' def __init__(self, logger=None): self.logger = logger def init_from_cfg(self, cfg): self.name = cfg.NAME self.is_default = cfg.get('IS_DEFAULT', False) module_paras = self.load_default(cfg.get('DEFAULT_PARAS', None)) assert cfg.have('MODEL') cfg.MODEL = self.redefine_paras(cfg.MODEL) self.diffusion = self.load_schedule(cfg.MODEL.SCHEDULE) self.diffusion_model = self.infer_model( cfg.MODEL.DIFFUSION_MODEL, module_paras.get( 'DIFFUSION_MODEL', None)) if cfg.MODEL.have('DIFFUSION_MODEL') else None self.first_stage_model = self.infer_model( cfg.MODEL.FIRST_STAGE_MODEL, module_paras.get( 'FIRST_STAGE_MODEL', None)) if cfg.MODEL.have('FIRST_STAGE_MODEL') else None self.cond_stage_model = self.infer_model( cfg.MODEL.COND_STAGE_MODEL, module_paras.get( 'COND_STAGE_MODEL', None)) if cfg.MODEL.have('COND_STAGE_MODEL') else None self.refiner_cond_model = self.infer_model( cfg.MODEL.REFINER_COND_MODEL, module_paras.get( 'REFINER_COND_MODEL', None)) if cfg.MODEL.have('REFINER_COND_MODEL') else None self.refiner_diffusion_model = self.infer_model( cfg.MODEL.REFINER_MODEL, module_paras.get( 'REFINER_MODEL', None)) if cfg.MODEL.have('REFINER_MODEL') else None self.tokenizer = TOKENIZERS.build( cfg.MODEL.TOKENIZER, logger=self.logger) if cfg.MODEL.have('TOKENIZER') else None if self.tokenizer is not None: self.cond_stage_model['cfg'].KWARGS = { 'vocab_size': self.tokenizer.vocab_size } def register_tuner(self, tuner_model_list): if len(tuner_model_list) < 1: if isinstance(self.diffusion_model['model'], SwiftModel): for adapter_name in self.diffusion_model['model'].adapters: self.diffusion_model['model'].deactivate_adapter( adapter_name, offload='cpu') if isinstance(self.cond_stage_model['model'], SwiftModel): for adapter_name in self.cond_stage_model['model'].adapters: self.cond_stage_model['model'].deactivate_adapter( adapter_name, offload='cpu') return all_diffusion_tuner = {} all_cond_tuner = {} save_root_dir = '.cache_tuner' for tuner_model in tuner_model_list: tunner_model_folder = tuner_model.MODEL_PATH local_tuner_model = FS.get_dir_to_local_dir(tunner_model_folder) all_tuner_datas = os.listdir(local_tuner_model) cur_tuner_md5 = hashlib.md5( tunner_model_folder.encode('utf-8')).hexdigest() local_diffusion_cache = os.path.join( save_root_dir, cur_tuner_md5 + '_' + 'diffusion') local_cond_cache = os.path.join(save_root_dir, cur_tuner_md5 + '_' + 'cond') meta_file = os.path.join(save_root_dir, cur_tuner_md5 + '_meta.json') if not os.path.exists(meta_file): diffusion_tuner = {} cond_tuner = {} for sub in all_tuner_datas: sub_file = os.path.join(local_tuner_model, sub) config_file = os.path.join(sub_file, CONFIG_NAME) safe_file = os.path.join(sub_file, SAFETENSORS_WEIGHTS_NAME) bin_file = os.path.join(sub_file, WEIGHTS_NAME) if os.path.isdir(sub_file) and os.path.isfile(config_file): # diffusion or cond cfg = json.load(open(config_file, 'r')) if 'cond_stage_model.' in cfg['target_modules']: cond_cfg = copy.deepcopy(cfg) if 'cond_stage_model.*' in cond_cfg[ 'target_modules']: cond_cfg['target_modules'] = cond_cfg[ 'target_modules'].replace( 'cond_stage_model.*', '.*') else: cond_cfg['target_modules'] = cond_cfg[ 'target_modules'].replace( 'cond_stage_model.', '') if cond_cfg['target_modules'].startswith('*'): cond_cfg['target_modules'] = '.' + cond_cfg[ 'target_modules'] os.makedirs(local_cond_cache + '_' + sub, exist_ok=True) cond_tuner[os.path.basename(local_cond_cache) + '_' + sub] = hashlib.md5( (local_cond_cache + '_' + sub).encode('utf-8')).hexdigest() os.makedirs(local_cond_cache + '_' + sub, exist_ok=True) json.dump( cond_cfg, open( os.path.join(local_cond_cache + '_' + sub, CONFIG_NAME), 'w')) if 'model.' in cfg['target_modules'].replace( 'cond_stage_model.', ''): diffusion_cfg = copy.deepcopy(cfg) if 'model.*' in diffusion_cfg['target_modules']: diffusion_cfg[ 'target_modules'] = diffusion_cfg[ 'target_modules'].replace( 'model.*', '.*') else: diffusion_cfg[ 'target_modules'] = diffusion_cfg[ 'target_modules'].replace( 'model.', '') if diffusion_cfg['target_modules'].startswith('*'): diffusion_cfg[ 'target_modules'] = '.' + diffusion_cfg[ 'target_modules'] os.makedirs(local_diffusion_cache + '_' + sub, exist_ok=True) diffusion_tuner[ os.path.basename(local_diffusion_cache) + '_' + sub] = hashlib.md5( (local_diffusion_cache + '_' + sub).encode('utf-8')).hexdigest() json.dump( diffusion_cfg, open( os.path.join( local_diffusion_cache + '_' + sub, CONFIG_NAME), 'w')) state_dict = {} is_bin_file = True if os.path.isfile(bin_file): state_dict = torch.load(bin_file) elif os.path.isfile(safe_file): is_bin_file = False state_dict = safe_load_file( safe_file, device='cuda' if torch.cuda.is_available() else 'cpu') save_diffusion_state_dict = {} save_cond_state_dict = {} for key, value in state_dict.items(): if key.startswith('model.'): save_diffusion_state_dict[ key[len('model.'):].replace( sub, os.path.basename(local_diffusion_cache) + '_' + sub)] = value elif key.startswith('cond_stage_model.'): save_cond_state_dict[ key[len('cond_stage_model.'):].replace( sub, os.path.basename(local_cond_cache) + '_' + sub)] = value if is_bin_file: if len(save_diffusion_state_dict) > 0: torch.save( save_diffusion_state_dict, os.path.join( local_diffusion_cache + '_' + sub, WEIGHTS_NAME)) if len(save_cond_state_dict) > 0: torch.save( save_cond_state_dict, os.path.join(local_cond_cache + '_' + sub, WEIGHTS_NAME)) else: if len(save_diffusion_state_dict) > 0: safe_save_file( save_diffusion_state_dict, os.path.join( local_diffusion_cache + '_' + sub, SAFETENSORS_WEIGHTS_NAME), metadata={'format': 'pt'}) if len(save_cond_state_dict) > 0: safe_save_file( save_cond_state_dict, os.path.join(local_cond_cache + '_' + sub, SAFETENSORS_WEIGHTS_NAME), metadata={'format': 'pt'}) json.dump( { 'diffusion_tuner': diffusion_tuner, 'cond_tuner': cond_tuner }, open(meta_file, 'w')) else: meta_conf = json.load(open(meta_file, 'r')) diffusion_tuner = meta_conf['diffusion_tuner'] cond_tuner = meta_conf['cond_tuner'] all_diffusion_tuner.update(diffusion_tuner) all_cond_tuner.update(cond_tuner) if len(all_diffusion_tuner) > 0: self.load(self.diffusion_model) self.diffusion_model['model'] = Swift.from_pretrained( self.diffusion_model['model'], save_root_dir, adapter_name=all_diffusion_tuner) self.diffusion_model['model'].set_active_adapters( list(all_diffusion_tuner.values())) self.unload(self.diffusion_model) if len(all_cond_tuner) > 0: self.load(self.cond_stage_model) self.cond_stage_model['model'] = Swift.from_pretrained( self.cond_stage_model['model'], save_root_dir, adapter_name=all_cond_tuner) self.cond_stage_model['model'].set_active_adapters( list(all_cond_tuner.values())) self.unload(self.cond_stage_model) def register_controllers(self, control_model_ins): if control_model_ins is None or control_model_ins == '': if isinstance(self.diffusion_model['model'], SwiftModel): if (hasattr(self.diffusion_model['model'].base_model, 'control_blocks') and self.diffusion_model['model'].base_model.control_blocks ): # noqa del self.diffusion_model['model'].base_model.control_blocks self.diffusion_model[ 'model'].base_model.control_blocks = None self.diffusion_model['model'].base_model.control_name = [] else: del self.diffusion_model['model'].control_blocks self.diffusion_model['model'].control_blocks = None self.diffusion_model['model'].control_name = [] return if not isinstance(control_model_ins, list): control_model_ins = [control_model_ins] control_model = nn.ModuleList([]) control_model_folder = [] for one_control in control_model_ins: one_control_model_folder = one_control.MODEL_PATH control_model_folder.append(one_control_model_folder) have_list = getattr(self.diffusion_model['model'], 'control_name', []) if one_control_model_folder in have_list: ind = have_list.index(one_control_model_folder) csc_tuners = copy.deepcopy( self.diffusion_model['model'].control_blocks[ind]) else: one_local_control_model = FS.get_dir_to_local_dir( one_control_model_folder) control_cfg = Config(cfg_file=os.path.join( one_local_control_model, 'configuration.json')) assert hasattr(control_cfg, 'CONTROL_MODEL') control_cfg.CONTROL_MODEL[ 'INPUT_BLOCK_CHANS'] = self.diffusion_model[ 'model']._input_block_chans control_cfg.CONTROL_MODEL[ 'INPUT_DOWN_FLAG'] = self.diffusion_model[ 'model']._input_down_flag control_cfg.CONTROL_MODEL.PRETRAINED_MODEL = os.path.join( one_local_control_model, 'pytorch_model.bin') csc_tuners = TUNERS.build(control_cfg.CONTROL_MODEL, logger=self.logger) control_model.append(csc_tuners) if isinstance(self.diffusion_model['model'], SwiftModel): del self.diffusion_model['model'].base_model.control_blocks self.diffusion_model[ 'model'].base_model.control_blocks = control_model self.diffusion_model[ 'model'].base_model.control_name = control_model_folder else: del self.diffusion_model['model'].control_blocks self.diffusion_model['model'].control_blocks = control_model self.diffusion_model['model'].control_name = control_model_folder def redefine_paras(self, cfg): if cfg.get('PRETRAINED_MODEL', None): assert FS.isfile(cfg.PRETRAINED_MODEL) with FS.get_from(cfg.PRETRAINED_MODEL, wait_finish=True) as local_path: if local_path.endswith('safetensors'): sd = load_safetensors(local_path) else: sd = torch.load(local_path, map_location='cpu') first_stage_model_path = os.path.join( os.path.dirname(local_path), 'first_stage_model.pth') cond_stage_model_path = os.path.join( os.path.dirname(local_path), 'cond_stage_model.pth') diffusion_model_path = os.path.join( os.path.dirname(local_path), 'diffusion_model.pth') if (not os.path.exists(first_stage_model_path) or not os.path.exists(cond_stage_model_path) or not os.path.exists(diffusion_model_path)): self.logger.info( 'Now read the whole model and rearrange the modules, it may take several mins.' ) first_stage_model = OrderedDict() cond_stage_model = OrderedDict() diffusion_model = OrderedDict() for k, v in sd.items(): if k.startswith('first_stage_model.'): first_stage_model[k.replace( 'first_stage_model.', '')] = v elif k.startswith('conditioner.'): cond_stage_model[k.replace('conditioner.', '')] = v elif k.startswith('cond_stage_model.'): if k.startswith('cond_stage_model.model.'): cond_stage_model[k.replace( 'cond_stage_model.model.', '')] = v else: cond_stage_model[k.replace( 'cond_stage_model.', '')] = v elif k.startswith('model.diffusion_model.'): diffusion_model[k.replace('model.diffusion_model.', '')] = v else: continue if cfg.have('FIRST_STAGE_MODEL'): with open(first_stage_model_path + 'cache', 'wb') as f: torch.save(first_stage_model, f) os.rename(first_stage_model_path + 'cache', first_stage_model_path) self.logger.info( 'First stage model has been processed.') if cfg.have('COND_STAGE_MODEL'): with open(cond_stage_model_path + 'cache', 'wb') as f: torch.save(cond_stage_model, f) os.rename(cond_stage_model_path + 'cache', cond_stage_model_path) self.logger.info( 'Cond stage model has been processed.') if cfg.have('DIFFUSION_MODEL'): with open(diffusion_model_path + 'cache', 'wb') as f: torch.save(diffusion_model, f) os.rename(diffusion_model_path + 'cache', diffusion_model_path) self.logger.info('Diffusion model has been processed.') if not cfg.FIRST_STAGE_MODEL.get('PRETRAINED_MODEL', None): cfg.FIRST_STAGE_MODEL.PRETRAINED_MODEL = first_stage_model_path else: cfg.FIRST_STAGE_MODEL.RELOAD_MODEL = first_stage_model_path if not cfg.COND_STAGE_MODEL.get('PRETRAINED_MODEL', None): cfg.COND_STAGE_MODEL.PRETRAINED_MODEL = cond_stage_model_path else: cfg.COND_STAGE_MODEL.RELOAD_MODEL = cond_stage_model_path if not cfg.DIFFUSION_MODEL.get('PRETRAINED_MODEL', None): cfg.DIFFUSION_MODEL.PRETRAINED_MODEL = diffusion_model_path else: cfg.DIFFUSION_MODEL.RELOAD_MODEL = diffusion_model_path return cfg def init_from_modules(self, modules): for k, v in modules.items(): self.__setattr__(k, v) def infer_model(self, cfg, module_paras=None): module = { 'model': None, 'cfg': cfg, 'device': 'offline', 'name': cfg.NAME, 'function_info': {}, 'paras': {} } if module_paras is None: return module function_info = {} paras = { k.lower(): v for k, v in module_paras.get('PARAS', {}).items() } for function in module_paras.get('FUNCTION', []): input_dict = {} for inp in function.get('INPUT', []): if inp.lower() in self.input: input_dict[inp.lower()] = self.input[inp.lower()] function_info[function.NAME] = { 'dtype': function.get('DTYPE', 'float32'), 'input': input_dict } module['paras'] = paras module['function_info'] = function_info return module def init_from_ckpt(self, path, model, ignore_keys=list()): if path.endswith('safetensors'): sd = load_safetensors(path) else: sd = torch.load(path, map_location='cpu') new_sd = OrderedDict() for k, v in sd.items(): ignored = False for ik in ignore_keys: if ik in k: if we.rank == 0: self.logger.info( 'Ignore key {} from state_dict.'.format(k)) ignored = True break if not ignored: new_sd[k] = v missing, unexpected = model.load_state_dict(new_sd, strict=False) if we.rank == 0: self.logger.info( f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' ) if len(missing) > 0: self.logger.info(f'Missing Keys:\n {missing}') if len(unexpected) > 0: self.logger.info(f'\nUnexpected Keys:\n {unexpected}') def load(self, module): if module['device'] == 'offline': if module['cfg'].NAME in MODELS.class_map: model = MODELS.build(module['cfg'], logger=self.logger).eval()
elif module['cfg'].NAME in BACKBONES.class_map:
2
2023-12-21 02:01:48+00:00
16k
RomGai/BrainVis
dc_ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "dc_ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "dc_ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "dc_ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "dc_ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "dc_ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "dc_ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "dc_ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "dc_ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "dc_ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "dc_ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "dc_ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "dc_ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "dc_ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "dc_ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n self.trainable = False\n \n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=False, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=False, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=False, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "dc_ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,generator=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device, generator=generator)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "PLMSSampler", "path": "dc_ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, generator=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device, generator=generator)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running PLMS Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps, t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "get_similarity_metric", "path": "eval_metrics.py", "snippet": "def get_similarity_metric(img1, img2, method='pair-wise', metric_name='mse', **kwargs):\n # img1: n, w, h, 3\n # img2: n, w, h, 3\n # all in pixel values: 0 ~ 255\n # return: list of scores 0 ~ 1.\n if img1.shape[-1] != 3:\n img1 = rearrange(img1, 'n c w h -> n w h c')\n if img2.shape[-1] != 3:\n img2 = rearrange(img2, 'n c w h -> n w h c')\n\n if method == 'pair-wise':\n eval_procedure_func = pair_wise_score \n elif method == 'n-way':\n eval_procedure_func = n_way_scores\n elif method == 'metrics-only':\n eval_procedure_func = metrics_only\n elif method == 'class':\n return get_n_way_top_k_acc(img1, img2, **kwargs)\n else:\n raise NotImplementedError\n\n if metric_name == 'mse':\n metric_func = mse_metric\n decision_func = smaller_the_better\n elif metric_name == 'pcc':\n metric_func = pcc_metric\n decision_func = larger_the_better\n elif metric_name == 'ssim':\n metric_func = ssim_metric\n decision_func = larger_the_better\n elif metric_name == 'psm':\n metric_func = psm_wrapper()\n decision_func = smaller_the_better\n elif metric_name == 'fid':\n metric_func = fid_wrapper()\n decision_func = smaller_the_better\n else:\n raise NotImplementedError\n \n return eval_procedure_func(img1, img2, metric_func, decision_func, **kwargs)" }, { "identifier": "FrozenImageEmbedder", "path": "dc_ldm/modules/encoders/modules.py", "snippet": "class FrozenImageEmbedder(AbstractEncoder):\n \"\"\"Uses the CLIP transformer encoder for text (from Hugging Face)\"\"\"\n def __init__(self, version=\"openai/clip-vit-large-patch14\", device=\"cuda\", max_length=77):\n super().__init__()\n # self.processor = AutoProcessor.from_pretrained(version)\n self.transformer = CLIPVisionModelWithProjection.from_pretrained(version)\n self.device = device\n self.max_length = max_length\n self.freeze()\n\n\n\n def freeze(self):\n self.transformer = self.transformer.eval()\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, inputs):\n # image = Image.open(requests.get(url, stream=True).raw)\n # inputs = self.processor(images=image, return_tensors=\"pt\")\n outputs = self.transformer(**inputs)\n image_embeds = outputs.image_embeds\n return image_embeds\n # z = outputs.last_hidden_state\n\n # return z\n\n def encode(self, inputs):\n return self(inputs)" } ]
import os import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from dc_ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from dc_ldm.modules.ema import LitEma from dc_ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from dc_ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from dc_ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from dc_ldm.models.diffusion.ddim import DDIMSampler from dc_ldm.models.diffusion.plms import PLMSSampler from PIL import Image from eval_metrics import get_similarity_metric from dc_ldm.modules.encoders.modules import FrozenImageEmbedder
14,233
for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, shape,cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with self.ema_scope("Plotting"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ddim_steps=300 ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.validation_count = 0 self.ddim_steps = ddim_steps self.return_cond = False self.output_path = None self.main_config = None self.best_val = 0.0 self.run_full_validation_threshold = 0.0 self.eval_avg = True def re_init_ema(self): if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.train() self.cond_stage_model.train() ###到底是在哪里训练的 loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=False, on_epoch=True) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=False, on_epoch=True) return loss @torch.no_grad() def generate(self, data, num_samples, ddim_steps=300, HW=None, limit=None, state=None): # fmri_embedding: n, seq_len, embed_dim all_samples = [] if HW is None: shape = (self.p_channels, self.p_image_size, self.p_image_size) else: num_resolutions = len(self.ch_mult) shape = (self.p_channels, HW[0] // 2**(num_resolutions-1), HW[1] // 2**(num_resolutions-1)) model = self sampler = PLMSSampler(model) # sampler = DDIMSampler(model) model.eval() if torch.cuda.is_available(): state = torch.cuda.get_rng_state() if state is None else state torch.cuda.set_rng_state(state) else: state = torch.get_rng_state() if state is None else state torch.set_rng_state(state) # rng = torch.Generator(device=self.device).manual_seed(2022).set_state(state) # state = torch.cuda.get_rng_state() with model.ema_scope(): for count, item in enumerate(zip(data['eeg'], data['image'])): if limit is not None: if count >= limit: break latent = item[0] # fmri embedding gt_image = rearrange(item[1], 'h w c -> 1 c h w') # h w c print(f"rendering {num_samples} examples in {ddim_steps} steps.") # c = model.get_learned_conditioning(repeat(latent, 'h w -> c h w', c=num_samples).to(self.device)) c, re_latent = model.get_learned_conditioning(repeat(latent, 'h w -> c h w', c=num_samples).to(self.device)) samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=c, batch_size=num_samples, shape=shape, verbose=False, generator=None) x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0) gt_image = torch.clamp((gt_image+1.0)/2.0,min=0.0, max=1.0) all_samples.append(torch.cat([gt_image.detach().cpu(), x_samples_ddim.detach().cpu()], dim=0)) # put groundtruth at first # display as grid grid = torch.stack(all_samples, 0) grid = rearrange(grid, 'n b c h w -> (n b) c h w') grid = make_grid(grid, nrow=num_samples+1) # to image grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() return grid, (255. * torch.stack(all_samples, 0).cpu().numpy()).astype(np.uint8), state def save_images(self, all_samples, suffix=0): # print('output_path') # print(self.output_path) if self.output_path is not None: os.makedirs(os.path.join(self.output_path, 'val', f'{self.validation_count}_{suffix}'), exist_ok=True) for sp_idx, imgs in enumerate(all_samples): # for copy_idx, img in enumerate(imgs[1:]): for copy_idx, img in enumerate(imgs): img = rearrange(img, 'c h w -> h w c') Image.fromarray(img).save(os.path.join(self.output_path, 'val', f'{self.validation_count}_{suffix}', f'test{sp_idx}-{copy_idx}.png')) def full_validation(self, batch, state=None): print('###### run full validation! ######\n') grid, all_samples, state = self.generate(batch, ddim_steps=self.ddim_steps, num_samples=5, limit=None, state=state) metric, metric_list = self.get_eval_metric(all_samples) self.save_images(all_samples, suffix='%.4f'%metric[-1]) metric_dict = {f'val/{k}_full':v for k, v in zip(metric_list, metric)} # self.logger.log_metrics(metric_dict) grid_imgs = Image.fromarray(grid.astype(np.uint8)) # self.logger.log_image(key=f'samples_test_full', images=[grid_imgs]) if metric[-1] > self.best_val: self.best_val = metric[-1] torch.save( { 'model_state_dict': self.state_dict(), 'config': self.main_config, 'state': state }, os.path.join(self.output_path, 'checkpoint_best.pth') ) @torch.no_grad() def validation_step(self, batch, batch_idx): if batch_idx != 0: return if self.validation_count % 5 == 0 and self.trainer.current_epoch != 0: self.full_validation(batch) else: # pass grid, all_samples, state = self.generate(batch, ddim_steps=self.ddim_steps, num_samples=3, limit=5) metric, metric_list = self.get_eval_metric(all_samples, avg=self.eval_avg) grid_imgs = Image.fromarray(grid.astype(np.uint8)) # self.logger.log_image(key=f'samples_test', images=[grid_imgs]) metric_dict = {f'val/{k}':v for k, v in zip(metric_list, metric)} # self.logger.log_metrics(metric_dict) if metric[-1] > self.run_full_validation_threshold: self.full_validation(batch, state=state) self.validation_count += 1 def get_eval_metric(self, samples, avg=True): metric_list = ['mse', 'pcc', 'ssim', 'psm'] res_list = [] gt_images = [img[0] for img in samples] gt_images = rearrange(np.stack(gt_images), 'n c h w -> n h w c') samples_to_run = np.arange(1, len(samples[0])) if avg else [1] for m in metric_list: res_part = [] for s in samples_to_run: pred_images = [img[s] for img in samples] pred_images = rearrange(np.stack(pred_images), 'n c h w -> n h w c') res = get_similarity_metric(pred_images, gt_images, method='pair-wise', metric_name=m) res_part.append(np.mean(res)) res_list.append(np.mean(res_part)) res_part = [] for s in samples_to_run: pred_images = [img[s] for img in samples] pred_images = rearrange(np.stack(pred_images), 'n c h w -> n h w c') res = get_similarity_metric(pred_images, gt_images, 'class', None, n_way=50, num_trials=50, top_k=1, device='cuda') res_part.append(np.mean(res)) res_list.append(np.mean(res_part)) res_list.append(np.max(res_part)) metric_list.append('top-1-class') metric_list.append('top-1-class (max)') return res_list, metric_list def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=True, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.train_cond_stage_only = False self.clip_tune = True if self.clip_tune: self.image_embedder = FrozenImageEmbedder() self.cls_tune = False def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() def freeze_diffusion_model(self): for param in self.model.parameters(): param.requires_grad = False def unfreeze_diffusion_model(self): for param in self.model.parameters(): param.requires_grad = True def freeze_cond_stage(self): for param in self.cond_stage_model.parameters(): param.requires_grad = False def unfreeze_cond_stage(self): for param in self.cond_stage_model.parameters(): param.requires_grad = True def freeze_first_stage(self): self.first_stage_model.trainable = False for param in self.first_stage_model.parameters(): param.requires_grad = False def unfreeze_first_stage(self): self.first_stage_model.trainable = True for param in self.first_stage_model.parameters(): param.requires_grad = True def freeze_whole_model(self): self.first_stage_model.trainable = False for param in self.parameters(): param.requires_grad = False def unfreeze_whole_model(self): self.first_stage_model.trainable = True for param in self.parameters(): param.requires_grad = True def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() # self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): # self.cond_stage_model.eval() if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c, re_latent = self.cond_stage_model.encode(c) # c = self.cond_stage_model.encode(c) else: c, re_latent = self.cond_stage_model(c) # c = self.cond_stage_model(c) # return c return c, re_latent def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) # print('encoder_posterior.shape') # print(encoder_posterior.shape) z = self.get_first_stage_encoding(encoder_posterior).detach() # print('z.shape') # print(z.shape) # print(cond_key) # print(self.cond_stage_key) # print(cond_key) if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox','fmri', 'eeg']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x # print('get input') # print(not self.cond_stage_trainable) # print(force_c_encode) if not self.cond_stage_trainable or force_c_encode : # print('get learned condition') if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c, re_latent = self.get_learned_conditioning(xc) # c = self.get_learned_conditioning(xc) else: c, re_latent = self.get_learned_conditioning(xc.to(self.device)) # c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c , batch['label'], batch['image_raw']] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): self.freeze_first_stage() # print('share step\'s get input') x, c, label, image_raw = self.get_input(batch, self.first_stage_key) # print('get input shape') # print('x.shape') # print(x.shape) # print('c.shape') # print(c.shape) if self.return_cond: loss, cc = self(x, c, label, image_raw) return loss, cc else: loss = self(x, c, label, image_raw) return loss def forward(self, x, c, label, image_raw, *args, **kwargs): # print(self.num_timesteps) t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() # print('t.shape') # print(t.shape) if self.model.conditioning_key is not None: assert c is not None imgs = c if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) c, re_latent = self.get_learned_conditioning(c) # print('c.shape') # print(c.shape) prefix = 'train' if self.training else 'val' loss, loss_dict = self.p_losses(x, c, t, *args, **kwargs) # pre_cls = self.cond_stage_model.get_cls(re_latent) # rencon = self.cond_stage_model.recon(re_latent) if self.clip_tune: image_embeds = self.image_embedder(image_raw) loss_clip = self.cond_stage_model.get_clip_loss(re_latent, image_embeds) # loss_recon = self.recon_loss(imgs, rencon) # loss_cls = self.cls_loss(label, pre_cls) loss += loss_clip # loss += loss_cls # loss_recon + #(self.original_elbo_weight * loss_vlb) # loss_dict.update({f'{prefix}/loss_recon': loss_recon}) # loss_dict.update({f'{prefix}/loss_cls': loss_cls}) loss_dict.update({f'{prefix}/loss_clip': loss_clip}) if self.cls_tune: pre_cls = self.cond_stage_model.get_cls(re_latent) loss_cls = self.cls_loss(label, pre_cls) # image_embeds = self.image_embedder(image_raw) # loss_clip = self.cond_stage_model.get_clip_loss(re_latent, image_embeds) # loss_recon = self.recon_loss(imgs, rencon) # loss_cls = self.cls_loss(label, pre_cls) loss += loss_cls # loss += loss_cls # loss_recon + #(self.original_elbo_weight * loss_vlb) # loss_dict.update({f'{prefix}/loss_recon': loss_recon}) # loss_dict.update({f'{prefix}/loss_cls': loss_cls}) loss_dict.update({f'{prefix}/loss_cls': loss_cls}) # if self.return_cond: # return self.p_losses(x, c, t, *args, **kwargs), c # return self.p_losses(x, c, t, *args, **kwargs) if self.return_cond: return loss, loss_dict, c return loss, loss_dict # def recon_loss(self, ) def recon_loss(self, imgs, pred): """ imgs: [N, 1, num_voxels] pred: [N, L, p] mask: [N, L], 0 is keep, 1 is remove, """ # target = self.patchify(imgs) loss = (pred - imgs) ** 2 loss = loss.mean() # loss = loss.mean(dim=-1) # [N, L], mean loss per patch # loss = (loss * mask).sum() / mask.sum() if mask.sum() != 0 else (loss * mask).sum() # mean loss on removed patches return loss def cls_loss(self, label, pred): return torch.nn.CrossEntropyLoss()(pred, label) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = torch.clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = torch.clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) # print('x_recon') # if isinstance(x_recon, tuple): # print('is tuple') # # print(len(x_recon)) # # print(x_recon[0].shape) # else: # print(x_recon.shape) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) # print('p_losses') # print('noise.shape') # print(noise.shape) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) # print('x_noisy[0].shape') # print(x_noisy[0].shape) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, shape,cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with self.ema_scope("Plotting"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
12
2023-12-16 12:52:14+00:00
16k
tonnetonne814/PL-Bert-VITS2
train_ms.py
[ { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n i=0\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n # sid = 1\n max_bert_len = max([x[4].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n bert_lengths = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n bert_padded = torch.FloatTensor(len(batch), 13, max_bert_len, 768)\n\n text_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n bert = row[4]\n bert_padded[i, :, :bert.size(1),:] = bert\n bert_lengths[i] = bert.size(1)\n\n\n if self.return_ids:\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n ids_sorted_decreasing,\n )\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n )" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.hparams = hparams\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.text_cleaners = hparams.text_cleaners\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 999)\n self.min_audio_len = getattr(hparams, \"min_audio_len\", 8192)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n self.count = 0\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n for data in self.audiopaths_sid_text:\n audiopath, sid, ph, text, bert, emo, style = data\n if not os.path.isfile(audiopath):\n continue\n if self.min_text_len <= len(text) and len(text) <= self.max_text_len:\n audiopaths_sid_text_new.append([audiopath, sid, ph, text, bert, emo, style])\n length = os.path.getsize(audiopath) // (2 * self.hop_length)\n if length < self.min_audio_len // self.hop_length:\n print(\"DATA PASS\")\n continue\n lengths.append(length)\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n print(f\"INFO:{len(self.audiopaths_sid_text)} is used as Training Dataset.\")\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, ph, text, pl_bert, emo, style = (\n audiopath_sid_text[0],\n audiopath_sid_text[1],\n audiopath_sid_text[2],\n audiopath_sid_text[3],\n audiopath_sid_text[4],\n audiopath_sid_text[5],\n audiopath_sid_text[6],\n )\n ph = self.get_text(ph)\n spec, wav = self.get_audio(audiopath)\n bert = self.get_pl_bert(pl_bert)\n sid = self.get_sid(sid)\n\n # parameter checker \n assert len(ph) == bert.size(1)\n\n return (ph, spec, wav, sid, bert)\n \n def get_pl_bert(self, filename):\n path = os.path.join(\"pl_bert_embeddings\", f\"{filename}.PlBertJa\")\n data = torch.load(path)\n if self.add_blank:\n L, T, H = data.shape\n new_data = torch.zeros(size=(L,2*T+1,H), dtype=data.dtype)\n for idx in range(T):\n target_idx = idx*2+1\n new_data[:, target_idx, :] = data[:, idx, :]\n data = new_data\n return data\n\n def get_audio(self, filename):\n # TODO : if linear spec exists convert to mel from existing linear spec\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate\n )\n )\n # audio_norm = audio / self.max_wav_value\n audio_norm = audio.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n \"\"\"TODO : (need verification)\n if linear spec exists convert to\n mel from existing linear spec (uncomment below lines)\"\"\"\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")),\n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text):\n if self.cleaned_text:\n text_norm = cleaned_text_to_sequence(text)\n else:\n text_norm = text_to_sequence(text, self.text_cleaners)\n if self.add_blank:\n text_norm = commons.intersperse(text_norm, 0)\n text_norm = torch.LongTensor(text_norm)\n return text_norm\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n if version.parse(torch.__version__) >= version.parse(\"2\"):\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n else:\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES", "path": "models.py", "snippet": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [\n \"dur_disc_1\",\n \"dur_disc_2\",\n]" }, { "identifier": "AVAILABLE_FLOW_TYPES", "path": "models.py", "snippet": "AVAILABLE_FLOW_TYPES = [\n \"pre_conv\",\n \"pre_conv2\",\n \"fft\",\n \"mono_layer_inter_residual\",\n \"mono_layer_post_residual\",\n]" }, { "identifier": "DurationDiscriminatorV1", "path": "models.py", "snippet": "class DurationDiscriminatorV1(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_1(x)\n # x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_2(x)\n # x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_1(x)\n # x = self.drop(x)\n x = self.conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_2(x)\n # x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "DurationDiscriminatorV2", "path": "models.py", "snippet": "class DurationDiscriminatorV2(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append([output_prob])\n\n return output_probs" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11, 17, 23, 37]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n bert_emb_size,\n n_speakers=0,\n gin_channels=0,\n use_sdp=True,\n **kwargs,\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", False\n )\n self.use_transformer_flows = kwargs.get(\"use_transformer_flows\", False)\n self.transformer_flow_type = kwargs.get(\n \"transformer_flow_type\", \"mono_layer_post_residual\"\n )\n if self.use_transformer_flows:\n assert (\n self.transformer_flow_type in AVAILABLE_FLOW_TYPES\n ), f\"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}\"\n self.use_sdp = use_sdp\n # self.use_duration_discriminator = kwargs.get(\"use_duration_discriminator\", False)\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n else:\n self.enc_gin_channels = 0\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n bert_emb_size=bert_emb_size,\n gin_channels=self.enc_gin_channels,\n )\n\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n # self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)\n self.flow = ResidualCouplingTransformersBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 4,\n gin_channels=gin_channels,\n use_transformer_flows=self.use_transformer_flows,\n transformer_flow_type=self.transformer_flow_type,\n )\n\n if use_sdp:\n self.dp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n else:\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n\n # 重み付け加算式を取る\n self.WSL = WeightSumLayer(n_layers=13)\n\n def forward(self, x, x_lengths, y, y_lengths, bert, bert_lengths, sid=None):\n bert = self.WSL(bert)\n\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n if self.use_sdp:\n l_length = self.dp(x, x_mask, w, g=g)\n l_length = l_length / torch.sum(x_mask)\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n logw_ = torch.log(w + 1e-6) * x_mask\n else:\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n bert,\n bert_lengths,\n sid=None,\n noise_scale=1,\n length_scale=1,\n noise_scale_w=1.0,\n max_len=None,\n ):\n bert = self.WSL(bert)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n if self.use_sdp:\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)\n else:\n logw = self.dp(x, x_mask, g=g)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n # currently vits-2 is not capable of voice conversion\n ## comment - choihkk\n ## Assuming the use of the ResidualCouplingTransformersLayer2 module, it seems that voice conversion is possible \n def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):\n assert self.n_speakers > 0, \"n_speakers have to be larger than 0.\"\n g_src = self.emb_g(sid_src).unsqueeze(-1)\n g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)\n z_p = self.flow(z, y_mask, g=g_src)\n z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)\n o_hat = self.dec(z_hat * y_mask, g=g_tgt)\n return o_hat, y_mask, (z, z_p, z_hat)" }, { "identifier": "symbols", "path": "PL_BERT_ja/text/symbols.py", "snippet": "" } ]
import argparse import itertools import json import math import os import logging import torch import torch.distributed as dist import torch.multiprocessing as mp import tqdm import commons import models import utils from torch import nn, optim from torch.cuda.amp import GradScaler, autocast from torch.nn import functional as F from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from data_utils import (DistributedBucketSampler, TextAudioSpeakerCollate, TextAudioSpeakerLoader) from losses import discriminator_loss, feature_loss, generator_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from models import (AVAILABLE_DURATION_DISCRIMINATOR_TYPES, AVAILABLE_FLOW_TYPES, DurationDiscriminatorV1, DurationDiscriminatorV2, MultiPeriodDiscriminator, SynthesizerTrn) from PL_BERT_ja.text.symbols import symbols
12,128
y, y_lengths, bert, bert_lengths, speakers, ) in enumerate(loader): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) bert, bert_lengths = bert.cuda(rank, non_blocking=True), bert_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g(x, x_lengths, spec, spec_lengths, bert, bert_lengths, speakers) if ( hps.model.use_mel_posterior_encoder or hps.data.use_mel_posterior_encoder ): mel = spec else: # comment - choihkk # for numerical stable when using fp16 and torch>=2.0.0, # spec.float() could be help in the training stage # https://github.com/jaywalnut310/vits/issues/15 mel = spec_to_mel_torch( spec.float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc # Duration Discriminator if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw_.detach(), logw.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_( net_dur_disc.parameters(), None ) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw_, logw) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
numba_logger = logging.getLogger('numba') numba_logger.setLevel(logging.WARNING) # from tensorboardX import SummaryWriter torch.backends.cudnn.benchmark = True global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "6060" hps = utils.get_hparams() mp.spawn( run, nprocs=n_gpus, args=( n_gpus, hps, ), ) def run(rank, n_gpus, hps): net_dur_disc = None global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group( backend="nccl", init_method="env://", world_size=n_gpus, rank=rank ) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) if ( "use_mel_posterior_encoder" in hps.model.keys() and hps.model.use_mel_posterior_encoder == True ): print("Using mel posterior encoder for VITS2") posterior_channels = 128 # vits2 hps.data.use_mel_posterior_encoder = True else: print("Using lin posterior encoder for VITS1") posterior_channels = hps.data.filter_length // 2 + 1 hps.data.use_mel_posterior_encoder = False train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 500, 700, 900, 1100, 1300, 1500, 3000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, ) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) # some of these flags are not being used in the code and directly set in hps json file. # they are kept here for reference and prototyping. if ( "use_transformer_flows" in hps.model.keys() and hps.model.use_transformer_flows == True ): use_transformer_flows = True transformer_flow_type = hps.model.transformer_flow_type print(f"Using transformer flows {transformer_flow_type} for VITS2") assert ( transformer_flow_type in AVAILABLE_FLOW_TYPES ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" else: print("Using normal flows for VITS1") use_transformer_flows = False if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True ): print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True ): # print("Using duration discriminator for VITS2") use_duration_discriminator = True # comment - choihkk # add duration discriminator type here # I think it would be a good idea to come up with a method to input this part accurately, like a hydra duration_discriminator_type = getattr( hps.model, "duration_discriminator_type", "dur_disc_1" ) print(f"Using duration_discriminator {duration_discriminator_type} for VITS2") assert ( duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES ), f"duration_discriminator_type must be one of {AVAILABLE_DURATION_DISCRIMINATOR_TYPES}" # duration_discriminator_type = AVAILABLE_DURATION_DISCRIMINATOR_TYPES # ここ修正 if duration_discriminator_type == "dur_disc_1": net_dur_disc = DurationDiscriminatorV1( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) elif duration_discriminator_type == "dur_disc_2": net_dur_disc = DurationDiscriminatorV2( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) else: print("NOT using any duration discriminator like VITS1") net_dur_disc = None use_duration_discriminator = False net_g = SynthesizerTrn( len(symbols)+1, posterior_channels, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( net_g.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None # comment - choihkk # if we comment out unused parameter like DurationDiscriminator's self.pre_out_norm1,2 self.norm_1,2 # and ResidualCouplingTransformersLayer's self.post_transformer # we don't have to set find_unused_parameters=True # but I will not proceed with commenting out for compatibility with the latest work for others net_g = DDP(net_g, device_ids=[rank]) net_d = DDP(net_d, device_ids=[rank]) if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[rank]) try: _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g ) _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d ) if net_dur_disc is not None: _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, ) global_step = (epoch_str - 1) * len(train_loader) input = input("Initialize Global Steps and Epochs ??? y/n") if input == "y": epoch_str = 1 global_step = 0 except: epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() if rank == 0: loader = tqdm.tqdm(train_loader, desc="Loading train data") else: loader = train_loader for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, bert, bert_lengths, speakers, ) in enumerate(loader): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) bert, bert_lengths = bert.cuda(rank, non_blocking=True), bert_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g(x, x_lengths, spec, spec_lengths, bert, bert_lengths, speakers) if ( hps.model.use_mel_posterior_encoder or hps.data.use_mel_posterior_encoder ): mel = spec else: # comment - choihkk # for numerical stable when using fp16 and torch>=2.0.0, # spec.float() could be help in the training stage # https://github.com/jaywalnut310/vits/issues/15 mel = spec_to_mel_torch( spec.float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc # Duration Discriminator if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw_.detach(), logw.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_( net_dur_disc.parameters(), None ) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw_, logw) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
loss_gen, losses_gen = generator_loss(y_d_hat_g)
5
2023-12-16 05:34:02+00:00
16k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py
[ { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))" }, { "identifier": "MultiHeadAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/attention_rim.py", "snippet": "class MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model_read, d_model_write, d_model_out, d_k, d_v, grad_sparse, residual=True, dropout=0.1, skip_write=False, flag=False):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Initialize Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print('d model read: ', d_model_read)\n # print('d_model_write: ', d_model_write)\n # print('d_model_out: ', d_model_out)\n # print('n_head: ', n_head)\n # print('d_k: ', d_k)\n # print('d_v: ', d_v)\n # print('num_blocks_read: ', num_blocks_read)\n # print('num_blocks_write: ', num_blocks_write)\n # input()\n\n self.GLN_qs = nn.Linear(d_model_read, n_head * d_k)\n self.GLN_ks = nn.Linear(d_model_write, n_head * d_k)\n self.GLN_vs = nn.Linear(d_model_write, n_head * d_v)\n\n self.residual = residual\n\n #self.w_qs = nn.Linear(d_model_read, n_head * d_k)\n #self.w_ks = nn.Linear(d_model_write, n_head * d_k)\n #self.w_vs = nn.Linear(d_model_write, n_head * d_v)\n\n #nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), flag=flag)\n #self.layer_norm = nn.LayerNorm(d_model)\n\n self.gate_fc = nn.Linear(n_head * d_v, d_model_out)\n\n if not skip_write:\n self.fc = nn.Linear(n_head * d_v, d_model_out)\n else:\n self.fc = lambda a: a\n\n #nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n self.ln = nn.LayerNorm(d_model_out)\n\n def forward(self, q, k, v, mask=None):\n\n #print('attn input shape', q.shape)\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n\n #print('q shape', q.shape)\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Forward of Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print(\"q: \", q.size())\n # print(\"k: \", k.size())\n # print(\"v: \", v.size())\n # input()\n\n q = self.GLN_qs(q).view(sz_b, len_q, n_head, d_k)\n #q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.GLN_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.GLN_vs(v).reshape(sz_b, len_v, n_head, d_v)\n #v = v.view(sz_b, len_v, n_head, d_v)\n\n # print(\"GLN q: \", q.size())\n # print(\"GLN k: \", k.size())\n # print(\"GLN v: \", v.size())\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n # print(\"Permute q: \", q.size())\n # print(\"Permute k: \", k.size())\n # print(\"Permute v: \", v.size())\n\n #mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn, extra_loss = self.attention(q, k, v, mask=None)\n\n # print(\"Output: \", output.size())\n # print(\"Attention: \", attn.size())\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n # print(\"Here Output: \", output.size())\n\n #print('output shape before fc', output.shape)\n\n #TODO: probably shouldn't just apply residual layer in the forward pass.\n\n output_init = output*1.0\n\n output = self.dropout(self.fc(output_init))\n\n gate = torch.sigmoid(self.gate_fc(output_init))\n\n #output = self.layer_norm(gate * output + (1 - gate) * residual)\n #output = gate * output + (1 - gate) * residual\n\n if self.residual:\n output = gate * torch.tanh(output)\n else:\n #output = self.ln(output)\n pass\n\n # print(\"Final Output: \", output.size())\n\n #output\n\n #print('attn', attn[0])\n #print('output input diff', output - residual)\n\n return output, attn, extra_loss" }, { "identifier": "quant_noise", "path": "multi_part_assembly/utils/wx_transformer_utilities/quant_noise.py", "snippet": "def quant_noise(module, p, block_size):\n \"\"\"\n Wraps modules and applies quantization noise to the weights for\n subsequent quantization with Iterative Product Quantization as\n described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights,\n see \"And the Bit Goes Down: Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper\n which consists in randomly dropping blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n assert module.weight.size(1) % block_size == 0, \"Input features must be a multiple of block sizes\"\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n assert module.in_channels % block_size == 0, \"Input channels must be a multiple of block sizes\"\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n assert k % block_size == 0, \"Kernel size must be a multiple of block size\"\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module" }, { "identifier": "GroupLinearLayer", "path": "multi_part_assembly/utils/wx_transformer_utilities/group_linear_layer.py", "snippet": "class GroupLinearLayer(nn.Module):\n\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n self.dout = dout\n\n if a is None:\n a = 1. / math.sqrt(dout * num_blocks)\n\n #gain = 1.0 / math.sqrt(2)\n #a = gain * math.sqrt(6.0 / (din + dout))\n\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n\n self.bias = bias\n\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n\n def forward(self,x):\n\n\t#input: ts x bs x blocks*nhid\n\t#ts*bs , blocks, nhid\n\t#blocks, ts*bs, nhid\n ts,bs,m = x.shape\t\n\n x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n \n if not self.bias is None:\n x = x + self.bias\n\n x = x.reshape((ts, bs, self.dout*self.nb))\n \n #if not self.bias is None:\n # x += self.bias\n\n return x" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_volatile.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n print(\"Using gate style\", gate_style)\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n self.attn_log = None\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"query\", self.query_proj)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"key\", self.key_proj)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n count_parameters(\"value\", self.value_proj)\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n #self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n #self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n count_parameters(\"attention_mlp\", self.attention_mlp[0])\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n count_parameters(\"layernorm1\", self.attended_memory_layernorm)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n count_parameters(\"layernorm2\", self.attended_memory_layernorm2)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n count_parameters(\"input_projector\", self.input_projector)\n\n #self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n \n if gate_style in ['unit', 'memory']:\n self.input_gate_projector = RepeatLinear(self.mem_size, self.num_gates, num_steps)\n count_parameters(\"input_gate_projector\", self.input_gate_projector)\n self.memory_gate_projector = GroupLinearLayer(self.mem_size, self.num_gates, self.mem_slots)\n #self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n\n #(self.mem_size, self.num_gates, self.mem_slots)\n count_parameters(\"memory_gate_projector\", self.memory_gate_projector)\n \n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n print(\"relational volatie!!!\") \n #self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 2))\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n if True:\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n return init_state\n else:\n init_state = torch.randn(batch_size, self.mem_slots, self.mem_size)\n return init_state\n def multihead_attention(self, input, memory, use_topk_ = True, store_log = True):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n scores = torch.softmax(scores, dim = -1)\n #if store_log:\n # self.attn_log = scores[0]\n if not self.null_attention:\n if self.use_topk and use_topk_:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n mask = torch.zeros(scores.size()).to(scores.device)\n mask.scatter_(3, topk.indices, 1)\n scores = scores * mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def print_log(self):\n print(self.attn_log)\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n \n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print('jello')\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n #self.attn_log = gates[0]\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n self.attn_log = torch.zeros(input_gate.shape[1], input_gate.shape[2], 2)\n self.attn_log[:, :, 0] = input_gate[0].cpu()\n\n input_gate = torch.sigmoid(input_gate+self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n #memory = self.multihead_attention(memory, memory, use_topk_ = False, store_log = False)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory)\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(inputs_reshape, memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n self.attn_log[:, :, 1] = input_gate[0].cpu()\n\n\n output = next_memory.reshape(next_memory.shape[0], -1)\n hx = self.multihead_attention(next_memory, inputs_reshape, use_topk_ = False, store_log = False)\n return output, next_memory, hx\n\n def forward(self, inputs, memory, parallel = True):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n if not parallel:\n for idx_step in range(inputs.shape[1]):\n logit, memory = self.forward_step(inputs[:, idx_step], memory)\n logits.append(logit)\n logits = torch.cat(logits)\n else:\n logits, memory, hx = self.forward_step(inputs, memory, treat_input_as_matrix = True)\n \n memory_out = None #self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory, hx\n else:\n return logits, memory_out, memory, hx" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_regressive.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n\n self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n self.input_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 2))\n self.score_log = None\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, ts, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n init_state = init_state.unsqueeze(1)\n init_state = init_state.repeat(1, ts, 1, 1)\n init_state = init_state.reshape(batch_size * ts, self.mem_slots, -1)\n\n return init_state\n\n def multihead_attention(self, input, memory, mask = None):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n mask = mask.unsqueeze(1).unsqueeze(1)\n #print(mask.size())\n #print(scores.size())\n #scores = scores.masked_fill(mask.bool(), float('-inf'))\n scores = Identity().apply(scores)\n\n scores = torch.softmax(scores, dim = -1)\n scores = scores * mask # mask for attending to prev positions only\n self.score_log = scores\n if True:\n if self.use_topk:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n topk_mask = torch.zeros(scores.size()).to(scores.device)\n topk_mask.scatter_(3, topk.indices, 1)\n scores = scores * topk_mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n inputs = inputs.view(inputs.shape[0], -1)\n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print(inputs.size())\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n input_gate = torch.sigmoid(input_gate + self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory, mask = None):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory, mask = mask)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False, mask = None, other_inp = None):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n #print(inputs.size())\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n #print(inputs_reshape.size())\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory, mask = mask)\n\n #print(next_memory.size())\n #print(inputs_reshape.size())\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(other_inp.unsqueeze(1), memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n\n\n output = next_memory.view(next_memory.shape[0], -1)\n return output, next_memory\n\n # relational memory这里是不是\n def forward(self, inputs, memory):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n B, T, D = inputs.size()\n mask = torch.ones(inputs.size(1), inputs.size(1)).to(inputs.device)\n mask = torch.tril(mask)\n mask = mask.unsqueeze(0)\n mask = mask.repeat(inputs.size(0), 1, 1)\n\n mask = mask.reshape(mask.size(0) * mask.size(1), -1)\n\n inputs_ = inputs.unsqueeze(2)\n inputs_ = inputs_.repeat(1, 1, inputs.size(1), 1)\n inputs_ = inputs_.reshape(B * T, T, -1)\n\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n #if not parallel:\n # for idx_step in range(inputs.shape[1]):\n # logit, memory = self.forward_step(inputs[:, idx_step], memory)\n # logits.append(logit)\n # logits = torch.cat(logits)\n #else:\n logits, memory = self.forward_step(inputs_, memory, treat_input_as_matrix = True, mask = mask, other_inp = inputs.reshape(B * T, -1))\n \n memory_out = self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory\n else:\n return logits, memory_out, memory\n\n def print_log(self):\n print(self.score_log[25])" } ]
import math import time import numpy as np import torch import torch.nn.functional as F import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils from typing import Dict, Optional, Tuple from torch import Tensor, nn from torch.nn import Parameter from .fairseq_dropout import FairseqDropout from .attention_rim import MultiHeadAttention as MHAMemory from .quant_noise import quant_noise from .group_linear_layer import GroupLinearLayer from .relational_memory_volatile import RelationalMemory from .relational_memory_regressive import RelationalMemory as RelationalMemoryRegressive
13,834
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads
self.dropout_module = FairseqDropout(
0
2023-12-15 13:13:01+00:00
16k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "AnimationFreeInitPipeline", "path": "animatediff/pipelines/pipeline_animation.py", "snippet": "class AnimationFreeInitPipeline(AnimationPipeline):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler)\n self.freq_filter = None\n\n \n @torch.no_grad()\n def init_filter(self, video_length, height, width, filter_params):\n # initialize frequency filter for noise reinitialization\n batch_size = 1\n num_channels_latents = self.unet.in_channels\n filter_shape = [\n batch_size, \n num_channels_latents, \n video_length, \n height // self.vae_scale_factor, \n width // self.vae_scale_factor\n ]\n # self.freq_filter = get_freq_filter(filter_shape, device=self._execution_device, params=filter_params)\n self.freq_filter = get_freq_filter(\n filter_shape, \n device=self._execution_device, \n filter_type=filter_params.method,\n n=filter_params.n,\n d_s=filter_params.d_s,\n d_t=filter_params.d_t\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n # freeinit args\n num_iters: int = 5,\n use_fast_sampling: bool = False,\n save_intermediate: bool = False,\n return_orig: bool = False,\n save_dir: str = None,\n save_name: str = None,\n use_fp16: bool = False,\n **kwargs\n ):\n if use_fp16:\n print('Warning: using half percision for inferencing!')\n self.vae.to(dtype=torch.float16)\n self.unet.to(dtype=torch.float16)\n self.text_encoder.to(dtype=torch.float16)\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # Check inputs. Raise error if not correct\n # import pdb\n # pdb.set_trace()\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n if negative_prompt is not None:\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size \n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_channels_latents,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n latents_dtype = latents.dtype\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # Sampling with FreeInit.\n for iter in range(num_iters):\n # FreeInit ------------------------------------------------------------------\n if iter == 0:\n initial_noise = latents.detach().clone()\n else:\n # 1. DDPM Forward with initial noise, get noisy latents z_T\n # if use_fast_sampling:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1\n # else:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1\n current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level\n diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep))\n diffuse_timesteps = diffuse_timesteps.long()\n z_T = self.scheduler.add_noise(\n original_samples=latents.to(device), \n noise=initial_noise.to(device), \n timesteps=diffuse_timesteps.to(device)\n )\n # 2. create random noise z_rand for high-frequency\n z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device)\n # 3. Roise Reinitialization\n latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter)\n latents = latents.to(latents_dtype)\n \n # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results)\n if use_fast_sampling:\n current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n self.scheduler.set_timesteps(current_num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n # --------------------------------------------------------------------------\n\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n # if use_fast_sampling:\n # # Coarse-to-Fine Sampling for Fast Inference\n # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n # current_timesteps = timesteps[:current_num_inference_steps]\n # else:\n current_timesteps = timesteps\n for i, t in enumerate(current_timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype)\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n \n # save intermediate results\n if save_intermediate:\n # Post-processing\n video = self.decode_latents(latents)\n video = torch.from_numpy(video)\n os.makedirs(save_dir, exist_ok=True)\n save_videos_grid(video, f\"{save_dir}/{save_name}_iter{iter}.gif\")\n \n if return_orig and iter==0:\n orig_video = self.decode_latents(latents)\n orig_video = torch.from_numpy(orig_video)\n\n # Post-processing\n video = self.decode_latents(latents)\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n if return_orig:\n return AnimationFreeInitPipelineOutput(videos=video, orig_videos=orig_video)\n\n return AnimationFreeInitPipelineOutput(videos=video)" }, { "identifier": "save_videos_grid", "path": "animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = (x * 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n text_model = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n text_model.load_state_dict(text_model_dict)\n\n return text_model" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n return new_checkpoint" }, { "identifier": "get_freq_filter", "path": "animatediff/utils/freeinit_utils.py", "snippet": "def get_freq_filter(shape, device, filter_type, n, d_s, d_t):\n \"\"\"\n Form the frequency filter for noise reinitialization.\n\n Args:\n shape: shape of latent (B, C, T, H, W)\n filter_type: type of the freq filter\n n: (only for butterworth) order of the filter, larger n ~ ideal, smaller n ~ gaussian\n d_s: normalized stop frequency for spatial dimensions (0.0-1.0)\n d_t: normalized stop frequency for temporal dimension (0.0-1.0)\n \"\"\"\n if filter_type == \"gaussian\":\n return gaussian_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"ideal\":\n return ideal_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"box\":\n return box_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"butterworth\":\n return butterworth_low_pass_filter(shape=shape, n=n, d_s=d_s, d_t=d_t).to(device)\n else:\n raise NotImplementedError" } ]
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
14,182
"A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key)
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key)
converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config)
5
2023-12-19 21:06:32+00:00
16k
exislow/tidal-dl-ng
tidal_dl_ng/gui.py
[ { "identifier": "get_format_template", "path": "tidal_dl_ng/helper/path.py", "snippet": "def get_format_template(\n media: Track | Album | Playlist | UserPlaylist | Video | Mix | MediaType, settings\n) -> str | bool:\n result = False\n\n if isinstance(media, Track) or media == MediaType.TRACK:\n result = settings.data.format_track\n elif isinstance(media, Album) or media == MediaType.ALBUM:\n result = settings.data.format_album\n elif isinstance(media, Playlist | UserPlaylist) or media == MediaType.PLAYLIST:\n result = settings.data.format_playlist\n elif isinstance(media, Mix) or media == MediaType.MIX:\n result = settings.data.format_mix\n elif isinstance(media, Video) or media == MediaType.VIDEO:\n result = settings.data.format_video\n\n return result" }, { "identifier": "Settings", "path": "tidal_dl_ng/config.py", "snippet": "class Settings(BaseConfig, metaclass=SingletonMeta):\n cls_model = ModelSettings\n data = None\n\n def __init__(self):\n self.file_path = path_file_settings()\n self.read(self.file_path)" }, { "identifier": "Tidal", "path": "tidal_dl_ng/config.py", "snippet": "class Tidal(BaseConfig, metaclass=SingletonMeta):\n cls_model = ModelToken\n session: tidalapi.Session = None\n data: ModelToken = None\n token_from_storage: bool = False\n settings: Settings = None\n\n def __init__(self, settings: Settings = None):\n self.session = tidalapi.Session()\n # self.session.config.client_id = \"km8T1xS355y7dd3H\"\n # self.session.config.client_secret = \"vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI=\"\n self.session.video_quality = tidalapi.VideoQuality.high\n self.file_path = path_file_token()\n self.token_from_storage = self.read(self.file_path)\n self.login_token()\n\n if settings:\n self.settings = settings\n self.settings_apply()\n\n def settings_apply(self, settings: Settings = None) -> bool:\n if settings:\n self.settings = settings\n\n self.session.audio_quality = self.settings.data.quality_audio\n\n return True\n\n def login_token(self) -> bool:\n result = False\n\n if self.token_from_storage:\n try:\n result = self.session.load_oauth_session(\n self.data.token_type, self.data.access_token, self.data.refresh_token, self.data.expiry_time\n )\n except HTTPError:\n result = False\n\n return result\n\n def login_oauth_start(self, function=print) -> None:\n self.session.login_oauth_simple(function)\n\n def login_oauth_finish(self) -> bool:\n result = self.session.check_login()\n\n if result:\n self.token_persist()\n\n return result\n\n def token_persist(self) -> None:\n self.set_option(\"token_type\", self.session.token_type)\n self.set_option(\"access_token\", self.session.access_token)\n self.set_option(\"refresh_token\", self.session.refresh_token)\n self.set_option(\"expiry_time\", self.session.expiry_time)\n self.save()\n\n def login(self, fn_print: Callable) -> bool:\n is_token = self.login_token()\n result = False\n\n if is_token:\n fn_print(\"Yep, looks good! You are logged in.\")\n\n result = True\n elif not is_token:\n fn_print(\"You either do not have a token or your token is invalid.\")\n fn_print(\"No worries, we will handle this...\")\n self.login_oauth_start(fn_print)\n\n is_login = self.login_oauth_finish()\n\n if is_login:\n fn_print(\"The login was successful. I have stored your credentials (token).\")\n\n result = True\n else:\n fn_print(\"Something went wrong. Did you login using your browser correctly? May try again...\")\n\n return result" }, { "identifier": "QualityVideo", "path": "tidal_dl_ng/constants.py", "snippet": "class QualityVideo(Enum):\n P360: int = 360\n P480: int = 480\n P720: int = 720\n P1080: int = 1080" }, { "identifier": "TidalLists", "path": "tidal_dl_ng/constants.py", "snippet": "class TidalLists(Enum):\n PLAYLISTS = \"Playlists\"\n FAVORITES = \"Favorites\"\n MIXES = \"Mixes\"" }, { "identifier": "Download", "path": "tidal_dl_ng/download.py", "snippet": "class Download:\n settings: Settings = None\n session: Session = None\n skip_existing: SkipExisting = False\n\n def __init__(self, session: Session, skip_existing: SkipExisting = SkipExisting.Disabled):\n self.settings = Settings()\n self.session = session\n self.skip_existing = skip_existing\n\n def _download(\n self,\n fn_logger: Callable,\n media: Track | Video,\n progress: Progress,\n progress_gui: ProgressBars,\n stream_manifest: StreamManifest,\n path_file: str,\n ):\n media_name: str = name_builder_item(media)\n\n # Set the correct progress output channel.\n if progress_gui is None:\n progress_stdout: bool = True\n else:\n progress_stdout: bool = False\n # Send signal to GUI with media name\n progress_gui.item_name.emit(media_name)\n\n try:\n # Compute total iterations for progress\n urls_count: int = len(stream_manifest.urls)\n\n if urls_count > 1:\n progress_total: int = urls_count\n block_size: int | None = None\n else:\n # Compute progress iterations based on the file size.\n r = requests.get(stream_manifest.urls[0], stream=True, timeout=REQUESTS_TIMEOUT_SEC)\n\n r.raise_for_status()\n\n # Get file size and compute progress steps\n total_size_in_bytes: int = int(r.headers.get(\"content-length\", 0))\n block_size: int | None = 4096\n progress_total: float = total_size_in_bytes / block_size\n\n # Create progress Task\n p_task: TaskID = progress.add_task(\n f\"[blue]Item '{media_name[:30]}'\",\n total=progress_total,\n visible=progress_stdout,\n )\n\n # Write content to file until progress is finished.\n while not progress.tasks[p_task].finished:\n with open(path_file, \"wb\") as f:\n for url in stream_manifest.urls:\n # Create the request object with stream=True, so the content won't be loaded into memory at once.\n r = requests.get(url, stream=True, timeout=REQUESTS_TIMEOUT_SEC)\n\n r.raise_for_status()\n\n # Write the content to disk. If `chunk_size` is set to `None` the whole file will be written at once.\n for data in r.iter_content(chunk_size=block_size):\n f.write(data)\n # Advance progress bar.\n progress.advance(p_task)\n\n # To send the progress to the GUI, we need to emit the percentage.\n if not progress_stdout:\n progress_gui.item.emit(progress.tasks[p_task].percentage)\n except HTTPError as e:\n # TODO: Handle Exception...\n fn_logger(e)\n\n # Check if file is encrypted.\n needs_decryption = self.is_encrypted(stream_manifest.encryption_type)\n\n if needs_decryption:\n key, nonce = decrypt_security_token(stream_manifest.encryption_key)\n tmp_path_file_decrypted = path_file + \"_decrypted\"\n decrypt_file(path_file, tmp_path_file_decrypted, key, nonce)\n else:\n tmp_path_file_decrypted = path_file\n\n # Write metadata to file.\n if not isinstance(media, Video):\n self.metadata_write(media, tmp_path_file_decrypted)\n\n return tmp_path_file_decrypted\n\n def instantiate_media(\n self,\n session: Session,\n media_type: type[MediaType.TRACK, MediaType.VIDEO, MediaType.ALBUM, MediaType.PLAYLIST, MediaType.MIX],\n id_media: str,\n ) -> Track | Video:\n if media_type == MediaType.TRACK:\n media = Track(session, id_media)\n elif media_type == MediaType.VIDEO:\n media = Video(session, id_media)\n elif media_type == MediaType.ALBUM:\n media = Album(self.session, id_media)\n elif media_type == MediaType.PLAYLIST:\n media = Playlist(self.session, id_media)\n elif media_type == MediaType.MIX:\n media = Mix(self.session, id_media)\n else:\n raise MediaUnknown\n\n return media\n\n def item(\n self,\n path_base: str,\n file_template: str,\n fn_logger: Callable,\n media: Track | Video = None,\n media_id: str = None,\n media_type: MediaType = None,\n video_download: bool = True,\n progress_gui: ProgressBars = None,\n progress: Progress = None,\n ) -> (bool, str):\n # If no media instance is provided, we need to create the media instance.\n if media_id and media_type:\n media = self.instantiate_media(self.session, media_type, media_id)\n elif not media:\n raise MediaMissing\n\n # If video download is not allowed end here\n if not video_download:\n fn_logger.info(\n f\"Video downloads are deactivated (see settings). Skipping video: {name_builder_item(media)}\"\n )\n\n return False, \"\"\n\n # Create file name and path\n file_name_relative = format_path_media(file_template, media)\n path_file = os.path.abspath(os.path.normpath(os.path.join(path_base, file_name_relative)))\n\n # Populate StreamManifest for further download.\n if isinstance(media, Track):\n stream = media.stream()\n manifest: str = stream.manifest\n mime_type: str = stream.manifest_mime_type\n else:\n manifest: str = media.get_url()\n mime_type: str = StreamManifestMimeType.VIDEO.value\n\n stream_manifest = self.stream_manifest_parse(manifest, mime_type)\n\n # Sanitize final path_file to fit into OS boundaries.\n path_file = path_file_sanitize(path_file + stream_manifest.file_extension, adapt=True)\n\n # Compute if and how downloads need to be skipped.\n if self.skip_existing.value:\n extension_ignore = self.skip_existing == SkipExisting.ExtensionIgnore\n # TODO: Check if extension is already in `path_file` or not.\n download_skip = check_file_exists(path_file, extension_ignore=extension_ignore)\n else:\n download_skip = False\n\n if not download_skip:\n # Create a temp directory and file.\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_path_dir:\n tmp_path_file = os.path.join(tmp_path_dir, str(uuid4()) + stream_manifest.file_extension)\n # Download media.\n tmp_path_file = self._download(fn_logger, media, progress, progress_gui, stream_manifest, tmp_path_file)\n\n if isinstance(media, Video) and self.settings.data.video_convert_mp4:\n # TODO: Make optional.\n # Convert `*.ts` file to `*.mp4` using ffmpeg\n tmp_path_file = self._video_convert(tmp_path_file)\n path_file = os.path.splitext(path_file)[0] + \".mp4\"\n\n # Move final file to the configured destination directory.\n os.makedirs(os.path.dirname(path_file), exist_ok=True)\n shutil.move(tmp_path_file, path_file)\n else:\n fn_logger.debug(f\"Download skipped, since file exists: '{path_file}'\")\n\n return not download_skip, path_file\n\n def cover_url(self, sid: str, dimension: CoverDimensions = CoverDimensions.Px320):\n if sid is None:\n return \"\"\n\n return f\"https://resources.tidal.com/images/{sid.replace('-', '/')}/{dimension.value}.jpg\"\n\n def metadata_write(self, track: Track, path_file: str):\n result: bool = False\n release_date: str = (\n track.album.release_date.strftime(\"%Y-%m-%d\") if track.album and track.album.release_date else \"\"\n )\n copy_right: str = track.copyright if hasattr(track, \"copyright\") and track.copyright else \"\"\n isrc: str = track.isrc if hasattr(track, \"isrc\") and track.isrc else \"\"\n lyrics: str = \"\"\n\n if self.settings.data.lyrics_save:\n # Try to retrieve lyrics.\n try:\n lyrics: str = track.lyrics().subtitles if hasattr(track, \"lyrics\") else \"\"\n except HTTPError:\n # TODO: Implement proper logging.\n print(f\"Could not retrieve lyrics for `{name_builder_item(track)}`.\")\n\n # TODO: Check if it is possible to pass \"None\" values.\n m: Metadata = Metadata(\n path_file=path_file,\n lyrics=lyrics,\n copy_right=copy_right,\n title=track.name,\n artists=[artist.name for artist in track.artists],\n album=track.album.name if track.album else \"\",\n tracknumber=track.track_num,\n date=release_date,\n isrc=isrc,\n albumartist=name_builder_item(track),\n totaltrack=track.album.num_tracks if track.album and track.album.num_tracks else 1,\n totaldisc=track.album.num_volumes if track.album and track.album.num_volumes else 1,\n discnumber=track.volume_num if track.volume_num else 1,\n url_cover=(\n self.cover_url(track.album.cover, self.settings.data.metadata_cover_dimension) if track.album else \"\"\n ),\n )\n\n m.save()\n\n result = True\n\n return result\n\n def items(\n self,\n path_base: str,\n fn_logger: Logger | WrapperLogger,\n media_id: str = None,\n media_type: MediaType = None,\n file_template: str = None,\n media: Album | Playlist | UserPlaylist | Mix = None,\n video_download: bool = False,\n progress_gui: ProgressBars = None,\n progress: Progress = None,\n download_delay: bool = True,\n ):\n # If no media instance is provided, we need to create the media instance.\n if media_id and media_type:\n media = self.instantiate_media(self.session, media_type, media_id)\n elif not media:\n raise MediaMissing\n\n # Create file name and path\n file_name_relative = format_path_media(file_template, media)\n\n # TODO: Extend with pagination support: Iterate through `items` and `tracks`until len(returned list) == 0\n # Get the items and name of the list.\n if isinstance(media, Mix):\n items = media.items()\n list_media_name = media.title[:30]\n elif video_download:\n items = media.items(limit=100)\n list_media_name = media.name[:30]\n else:\n items = media.tracks(limit=999)\n list_media_name = media.name[:30]\n\n # Determine where to redirect the progress information.\n if progress_gui is None:\n progress_stdout: bool = True\n else:\n progress_stdout: bool = False\n\n # Create the list progress task.\n p_task1: TaskID = progress.add_task(\n f\"[green]List '{list_media_name}'\", total=len(items), visible=progress_stdout\n )\n\n # Iterate through list items\n while not progress.finished:\n for media in items:\n # TODO: Handle return value of `track` method.\n # Download the item.\n status_download, result_path_file = self.item(\n path_base=path_base,\n file_template=file_name_relative,\n media=media,\n progress_gui=progress_gui,\n progress=progress,\n fn_logger=fn_logger,\n )\n\n # Advance progress bar.\n progress.advance(p_task1)\n\n if not progress_stdout:\n progress_gui.list_item.emit(progress.tasks[p_task1].percentage)\n\n # If a file was downloaded and the download delay is enabled, wait until the next download.\n if download_delay and status_download:\n time_sleep: float = round(random.SystemRandom().uniform(2, 5), 1)\n\n # TODO: Fix logging. Is not displayed in debug window.\n fn_logger.debug(f\"Next download will start in {time_sleep} seconds.\")\n time.sleep(time_sleep)\n\n def is_encrypted(self, encryption_type: str) -> bool:\n result = encryption_type != \"NONE\"\n\n return result\n\n def get_file_extension(self, stream_url: str, stream_codec: str) -> str:\n if \".flac\" in stream_url:\n result: str = \".flac\"\n elif \".mp4\" in stream_url:\n # TODO: Need to investigate, what the correct extension is.\n # if \"ac4\" in stream_codec or \"mha1\" in stream_codec:\n # result = \".mp4\"\n # elif \"flac\" in stream_codec:\n # result = \".flac\"\n # else:\n # result = \".m4a\"\n result: str = \".mp4\"\n elif \".ts\" in stream_url:\n result: str = \".ts\"\n else:\n result: str = \".m4a\"\n\n return result\n\n def _video_convert(self, path_file: str) -> str:\n path_file_out = os.path.splitext(path_file)[0] + \".mp4\"\n result, _ = ffmpeg.input(path_file).output(path_file_out, map=0, c=\"copy\").run()\n\n return path_file_out\n\n def stream_manifest_parse(self, manifest: str, mime_type: str) -> StreamManifest:\n if mime_type == StreamManifestMimeType.MPD.value:\n # Stream Manifest is base64 encoded.\n manifest_parsed: str = base64.b64decode(manifest).decode(\"utf-8\")\n mpd = MPEGDASHParser.parse(manifest_parsed)\n codecs: str = mpd.periods[0].adaptation_sets[0].representations[0].codecs\n mime_type: str = mpd.periods[0].adaptation_sets[0].mime_type\n # TODO: Handle encryption key. But I have never seen an encrypted file so far.\n encryption_type: str = \"NONE\"\n encryption_key: str | None = None\n # .initialization + the very first of .media; See https://developers.broadpeak.io/docs/foundations-dash\n segments_count = 1 + 1\n\n for s in mpd.periods[0].adaptation_sets[0].representations[0].segment_templates[0].segment_timelines[0].Ss:\n segments_count += s.r if s.r else 1\n\n # Populate segment urls.\n segment_template = mpd.periods[0].adaptation_sets[0].representations[0].segment_templates[0]\n stream_urls: list[str] = []\n\n for index in range(segments_count):\n stream_urls.append(segment_template.media.replace(\"$Number$\", str(index)))\n\n elif mime_type == StreamManifestMimeType.BTS.value:\n # Stream Manifest is base64 encoded.\n manifest_parsed: str = base64.b64decode(manifest).decode(\"utf-8\")\n # JSON string to object.\n stream_manifest = json.loads(manifest_parsed)\n # TODO: Handle more than one download URL\n stream_urls: str = stream_manifest[\"urls\"]\n codecs: str = stream_manifest[\"codecs\"]\n mime_type: str = stream_manifest[\"mimeType\"]\n encryption_type: str = stream_manifest[\"encryptionType\"]\n encryption_key: str | None = (\n stream_manifest[\"encryptionKey\"] if self.is_encrypted(encryption_type) else None\n )\n elif mime_type == StreamManifestMimeType.VIDEO.value:\n # Parse M3U8 video playlist\n m3u8_variant: m3u8.M3U8 = m3u8.load(manifest)\n # Find the desired video resolution or the next best one.\n m3u8_playlist, codecs = self._extract_video_stream(m3u8_variant, self.settings.data.quality_video.value)\n # Populate urls.\n stream_urls: list[str] = m3u8_playlist.files\n\n # TODO: Handle encryption key. But I have never seen an encrypted file so far.\n encryption_type: str = \"NONE\"\n encryption_key: str | None = None\n else:\n raise UnknownManifestFormat\n\n file_extension: str = self.get_file_extension(stream_urls[0], codecs)\n\n result: StreamManifest = StreamManifest(\n urls=stream_urls,\n codecs=codecs,\n file_extension=file_extension,\n encryption_type=encryption_type,\n encryption_key=encryption_key,\n mime_type=mime_type,\n )\n\n return result\n\n def _extract_video_stream(self, m3u8_variant: m3u8.M3U8, quality: str) -> (m3u8.M3U8 | bool, str):\n m3u8_playlist: m3u8.M3U8 | bool = False\n resolution_best: int = 0\n mime_type: str = \"\"\n\n if m3u8_variant.is_variant:\n for playlist in m3u8_variant.playlists:\n if resolution_best < playlist.stream_info.resolution[1]:\n resolution_best = playlist.stream_info.resolution[1]\n m3u8_playlist = m3u8.load(playlist.uri)\n mime_type = playlist.stream_info.codecs\n\n if quality == playlist.stream_info.resolution[1]:\n break\n\n return m3u8_playlist, mime_type" }, { "identifier": "XStream", "path": "tidal_dl_ng/logger.py", "snippet": "class XStream(QtCore.QObject):\nclass QtHandler(logging.Handler):\n def flush(self):\n def fileno(self):\n def write(self, msg):\n def stdout():\n def stderr():\n def __init__(self):\n def emit(self, record):" }, { "identifier": "ProgressBars", "path": "tidal_dl_ng/model/gui_data.py", "snippet": "class ProgressBars:\n item: QtCore.Signal\n item_name: QtCore.Signal\n list_item: QtCore.Signal" }, { "identifier": "ResultSearch", "path": "tidal_dl_ng/model/gui_data.py", "snippet": "class ResultSearch:\n position: int\n artist: str\n title: str\n album: str\n duration_sec: int\n obj: object" }, { "identifier": "Ui_MainWindow", "path": "tidal_dl_ng/ui/main.py", "snippet": "class Ui_MainWindow:\n def setupUi(self, MainWindow):\n if not MainWindow.objectName():\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(900, 700)\n self.a_options = QAction(MainWindow)\n self.a_options.setObjectName(\"a_options\")\n self.a_options.setEnabled(False)\n self.a_options.setText(\"Options\")\n self.a_options.setIconText(\"Options\")\n # if QT_CONFIG(tooltip)\n self.a_options.setToolTip(\"Options\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.a_options.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.a_options.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n self.w_central = QWidget(MainWindow)\n self.w_central.setObjectName(\"w_central\")\n self.w_central.setEnabled(True)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(100)\n sizePolicy.setVerticalStretch(100)\n sizePolicy.setHeightForWidth(self.w_central.sizePolicy().hasHeightForWidth())\n self.w_central.setSizePolicy(sizePolicy)\n # if QT_CONFIG(tooltip)\n self.w_central.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.w_central.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.w_central.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.w_central.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.w_central.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.horizontalLayout = QHBoxLayout(self.w_central)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.lh_main = QHBoxLayout()\n self.lh_main.setObjectName(\"lh_main\")\n self.lh_main.setSizeConstraint(QLayout.SetNoConstraint)\n self.tr_lists_user = QTreeWidget(self.w_central)\n __qtreewidgetitem = QTreeWidgetItem()\n __qtreewidgetitem.setText(1, \"Info\")\n __qtreewidgetitem.setText(0, \"Playlist\")\n self.tr_lists_user.setHeaderItem(__qtreewidgetitem)\n __qtreewidgetitem1 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem1.setFlags(Qt.ItemIsEnabled)\n __qtreewidgetitem2 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem2.setFlags(Qt.ItemIsEnabled)\n __qtreewidgetitem3 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem3.setFlags(Qt.ItemIsEnabled)\n self.tr_lists_user.setObjectName(\"tr_lists_user\")\n # if QT_CONFIG(tooltip)\n self.tr_lists_user.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.tr_lists_user.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.tr_lists_user.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.tr_lists_user.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.tr_lists_user.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.tr_lists_user.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tr_lists_user.setProperty(\"showDropIndicator\", False)\n self.tr_lists_user.setIndentation(10)\n self.tr_lists_user.setUniformRowHeights(True)\n self.tr_lists_user.setSortingEnabled(True)\n self.tr_lists_user.header().setCascadingSectionResizes(True)\n self.tr_lists_user.header().setHighlightSections(True)\n self.tr_lists_user.header().setProperty(\"showSortIndicator\", True)\n\n self.lh_main.addWidget(self.tr_lists_user)\n\n self.lv_search_result = QVBoxLayout()\n # ifndef Q_OS_MAC\n self.lv_search_result.setSpacing(-1)\n # endif\n self.lv_search_result.setObjectName(\"lv_search_result\")\n self.lh_search = QHBoxLayout()\n self.lh_search.setObjectName(\"lh_search\")\n self.l_search = QLineEdit(self.w_central)\n self.l_search.setObjectName(\"l_search\")\n self.l_search.setAcceptDrops(False)\n # if QT_CONFIG(tooltip)\n self.l_search.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_search.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_search.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_search.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_search.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_search.setLocale(QLocale(QLocale.English, QLocale.UnitedStates))\n self.l_search.setText(\"\")\n self.l_search.setPlaceholderText(\"Type and press ENTER to search...\")\n self.l_search.setClearButtonEnabled(True)\n\n self.lh_search.addWidget(self.l_search)\n\n self.cb_search_type = QComboBox(self.w_central)\n self.cb_search_type.setObjectName(\"cb_search_type\")\n # if QT_CONFIG(tooltip)\n self.cb_search_type.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_search_type.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_search_type.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_search_type.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_search_type.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_search_type.setCurrentText(\"\")\n self.cb_search_type.setPlaceholderText(\"\")\n\n self.lh_search.addWidget(self.cb_search_type)\n\n self.b_search = QPushButton(self.w_central)\n self.b_search.setObjectName(\"b_search\")\n # if QT_CONFIG(statustip)\n self.b_search.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.b_search.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.b_search.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.b_search.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.b_search.setText(\"Search\")\n # if QT_CONFIG(shortcut)\n self.b_search.setShortcut(\"\")\n # endif // QT_CONFIG(shortcut)\n\n self.lh_search.addWidget(self.b_search)\n\n self.lv_search_result.addLayout(self.lh_search)\n\n self.tr_results = QTreeWidget(self.w_central)\n self.tr_results.setObjectName(\"tr_results\")\n self.tr_results.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tr_results.setProperty(\"showDropIndicator\", False)\n self.tr_results.setDragDropOverwriteMode(False)\n self.tr_results.setAlternatingRowColors(False)\n self.tr_results.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.tr_results.setIndentation(10)\n self.tr_results.setSortingEnabled(True)\n self.tr_results.header().setProperty(\"showSortIndicator\", True)\n self.tr_results.header().setStretchLastSection(False)\n\n self.lv_search_result.addWidget(self.tr_results)\n\n self.lh_download = QHBoxLayout()\n self.lh_download.setObjectName(\"lh_download\")\n self.l_quality_audio = QLabel(self.w_central)\n self.l_quality_audio.setObjectName(\"l_quality_audio\")\n # if QT_CONFIG(tooltip)\n self.l_quality_audio.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_quality_audio.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_quality_audio.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_quality_audio.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_quality_audio.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_quality_audio.setText(\"Audio\")\n self.l_quality_audio.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)\n\n self.lh_download.addWidget(self.l_quality_audio)\n\n self.cb_quality_audio = QComboBox(self.w_central)\n self.cb_quality_audio.setObjectName(\"cb_quality_audio\")\n # if QT_CONFIG(tooltip)\n self.cb_quality_audio.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_quality_audio.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_quality_audio.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_quality_audio.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_quality_audio.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_quality_audio.setCurrentText(\"\")\n self.cb_quality_audio.setPlaceholderText(\"\")\n self.cb_quality_audio.setFrame(True)\n\n self.lh_download.addWidget(self.cb_quality_audio)\n\n self.l_quality_video = QLabel(self.w_central)\n self.l_quality_video.setObjectName(\"l_quality_video\")\n # if QT_CONFIG(tooltip)\n self.l_quality_video.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_quality_video.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_quality_video.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_quality_video.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_quality_video.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_quality_video.setText(\"Video\")\n self.l_quality_video.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)\n\n self.lh_download.addWidget(self.l_quality_video)\n\n self.cb_quality_video = QComboBox(self.w_central)\n self.cb_quality_video.setObjectName(\"cb_quality_video\")\n # if QT_CONFIG(tooltip)\n self.cb_quality_video.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_quality_video.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_quality_video.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_quality_video.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_quality_video.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_quality_video.setCurrentText(\"\")\n self.cb_quality_video.setPlaceholderText(\"\")\n\n self.lh_download.addWidget(self.cb_quality_video)\n\n self.b_download = QPushButton(self.w_central)\n self.b_download.setObjectName(\"b_download\")\n # if QT_CONFIG(tooltip)\n self.b_download.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.b_download.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.b_download.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.b_download.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.b_download.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.b_download.setText(\"Download\")\n # if QT_CONFIG(shortcut)\n self.b_download.setShortcut(\"\")\n # endif // QT_CONFIG(shortcut)\n\n self.lh_download.addWidget(self.b_download)\n\n self.lh_download.setStretch(0, 5)\n self.lh_download.setStretch(2, 5)\n self.lh_download.setStretch(4, 15)\n\n self.lv_search_result.addLayout(self.lh_download)\n\n self.te_debug = QPlainTextEdit(self.w_central)\n self.te_debug.setObjectName(\"te_debug\")\n self.te_debug.setEnabled(True)\n sizePolicy1 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Maximum)\n sizePolicy1.setHorizontalStretch(0)\n sizePolicy1.setVerticalStretch(0)\n sizePolicy1.setHeightForWidth(self.te_debug.sizePolicy().hasHeightForWidth())\n self.te_debug.setSizePolicy(sizePolicy1)\n self.te_debug.setMaximumSize(QSize(16777215, 16777215))\n self.te_debug.setAcceptDrops(False)\n # if QT_CONFIG(tooltip)\n self.te_debug.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.te_debug.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.te_debug.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.te_debug.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.te_debug.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.te_debug.setUndoRedoEnabled(False)\n self.te_debug.setReadOnly(True)\n\n self.lv_search_result.addWidget(self.te_debug)\n\n self.lh_main.addLayout(self.lv_search_result)\n\n self.lh_main.setStretch(0, 40)\n self.lh_main.setStretch(1, 60)\n\n self.horizontalLayout.addLayout(self.lh_main)\n\n MainWindow.setCentralWidget(self.w_central)\n self.menubar = QMenuBar(MainWindow)\n self.menubar.setObjectName(\"menubar\")\n self.menubar.setGeometry(QRect(0, 0, 900, 24))\n # if QT_CONFIG(tooltip)\n self.menubar.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.menubar.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.menubar.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.menubar.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.menubar.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.m_file = QMenu(self.menubar)\n self.m_file.setObjectName(\"m_file\")\n # if QT_CONFIG(tooltip)\n self.m_file.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.m_file.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.m_file.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.m_file.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.m_file.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n # if QT_CONFIG(tooltip)\n self.statusbar.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.statusbar.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.statusbar.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.statusbar.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.statusbar.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.statusbar.setLayoutDirection(Qt.LeftToRight)\n MainWindow.setStatusBar(self.statusbar)\n\n self.menubar.addAction(self.m_file.menuAction())\n self.m_file.addAction(self.a_options)\n\n self.retranslateUi(MainWindow)\n\n QMetaObject.connectSlotsByName(MainWindow)\n\n # setupUi\n\n def retranslateUi(self, MainWindow):\n MainWindow.setWindowTitle(QCoreApplication.translate(\"MainWindow\", \"MainWindow\", None))\n ___qtreewidgetitem = self.tr_lists_user.headerItem()\n ___qtreewidgetitem.setText(2, QCoreApplication.translate(\"MainWindow\", \"obj\", None))\n\n __sortingEnabled = self.tr_lists_user.isSortingEnabled()\n self.tr_lists_user.setSortingEnabled(False)\n ___qtreewidgetitem1 = self.tr_lists_user.topLevelItem(0)\n ___qtreewidgetitem1.setText(0, QCoreApplication.translate(\"MainWindow\", \"Playlists\", None))\n ___qtreewidgetitem2 = self.tr_lists_user.topLevelItem(1)\n ___qtreewidgetitem2.setText(0, QCoreApplication.translate(\"MainWindow\", \"Mixes\", None))\n ___qtreewidgetitem3 = self.tr_lists_user.topLevelItem(2)\n ___qtreewidgetitem3.setText(0, QCoreApplication.translate(\"MainWindow\", \"Favorites\", None))\n self.tr_lists_user.setSortingEnabled(__sortingEnabled)\n\n ___qtreewidgetitem4 = self.tr_results.headerItem()\n ___qtreewidgetitem4.setText(5, QCoreApplication.translate(\"MainWindow\", \"obj\", None))\n ___qtreewidgetitem4.setText(4, QCoreApplication.translate(\"MainWindow\", \"Duration\", None))\n ___qtreewidgetitem4.setText(3, QCoreApplication.translate(\"MainWindow\", \"Album\", None))\n ___qtreewidgetitem4.setText(2, QCoreApplication.translate(\"MainWindow\", \"Title\", None))\n ___qtreewidgetitem4.setText(1, QCoreApplication.translate(\"MainWindow\", \"Artist\", None))\n ___qtreewidgetitem4.setText(0, QCoreApplication.translate(\"MainWindow\", \"#\", None))\n self.te_debug.setPlaceholderText(QCoreApplication.translate(\"MainWindow\", \"Logs...\", None))\n self.m_file.setTitle(QCoreApplication.translate(\"MainWindow\", \"File\", None))\n\n # retranslateUi" }, { "identifier": "QtWaitingSpinner", "path": "tidal_dl_ng/ui/spinner.py", "snippet": "class QtWaitingSpinner(QWidget):\n def __init__(\n self, parent, centerOnParent=True, disableParentWhenSpinning=False, modality=Qt.WindowModality.NonModal\n ):\n super().__init__(parent)\n\n self._centerOnParent = centerOnParent\n self._disableParentWhenSpinning = disableParentWhenSpinning\n\n # WAS IN initialize()\n self._color = QColor(Qt.GlobalColor.black)\n self._roundness = 100.0\n self._minimumTrailOpacity = 3.14159265358979323846\n self._trailFadePercentage = 80.0\n self._revolutionsPerSecond = 1.57079632679489661923\n self._numberOfLines = 20\n self._lineLength = 10\n self._lineWidth = 2\n self._innerRadius = 10\n self._currentCounter = 0\n self._isSpinning = False\n\n self._timer = QTimer(self)\n self._timer.timeout.connect(self.rotate)\n self.updateSize()\n self.updateTimer()\n self.hide()\n # END initialize()\n\n self.setWindowModality(modality)\n self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground)\n\n def paintEvent(self, QPaintEvent):\n self.updatePosition()\n painter = QPainter(self)\n painter.fillRect(self.rect(), Qt.GlobalColor.transparent)\n # Can't found in Qt6\n # painter.setRenderHint(QPainter.Antialiasing, True)\n\n if self._currentCounter >= self._numberOfLines:\n self._currentCounter = 0\n\n painter.setPen(Qt.PenStyle.NoPen)\n for i in range(0, self._numberOfLines):\n painter.save()\n painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)\n rotateAngle = float(360 * i) / float(self._numberOfLines)\n painter.rotate(rotateAngle)\n painter.translate(self._innerRadius, 0)\n distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)\n color = self.currentLineColor(\n distance, self._numberOfLines, self._trailFadePercentage, self._minimumTrailOpacity, self._color\n )\n painter.setBrush(color)\n rect = QRect(0, int(-self._lineWidth / 2), int(self._lineLength), int(self._lineWidth))\n painter.drawRoundedRect(rect, self._roundness, self._roundness, Qt.SizeMode.RelativeSize)\n painter.restore()\n\n def start(self):\n self.updatePosition()\n self._isSpinning = True\n self.show()\n\n if self.parentWidget and self._disableParentWhenSpinning:\n self.parentWidget().setEnabled(False)\n\n if not self._timer.isActive():\n self._timer.start()\n self._currentCounter = 0\n\n def stop(self):\n self._isSpinning = False\n self.hide()\n\n if self.parentWidget() and self._disableParentWhenSpinning:\n self.parentWidget().setEnabled(True)\n\n if self._timer.isActive():\n self._timer.stop()\n self._currentCounter = 0\n\n def setNumberOfLines(self, lines):\n self._numberOfLines = lines\n self._currentCounter = 0\n self.updateTimer()\n\n def setLineLength(self, length):\n self._lineLength = length\n self.updateSize()\n\n def setLineWidth(self, width):\n self._lineWidth = width\n self.updateSize()\n\n def setInnerRadius(self, radius):\n self._innerRadius = radius\n self.updateSize()\n\n def color(self):\n return self._color\n\n def roundness(self):\n return self._roundness\n\n def minimumTrailOpacity(self):\n return self._minimumTrailOpacity\n\n def trailFadePercentage(self):\n return self._trailFadePercentage\n\n def revolutionsPersSecond(self):\n return self._revolutionsPerSecond\n\n def numberOfLines(self):\n return self._numberOfLines\n\n def lineLength(self):\n return self._lineLength\n\n def lineWidth(self):\n return self._lineWidth\n\n def innerRadius(self):\n return self._innerRadius\n\n def isSpinning(self):\n return self._isSpinning\n\n def setRoundness(self, roundness):\n self._roundness = max(0.0, min(100.0, roundness))\n\n def setColor(self, color=Qt.GlobalColor.black):\n self._color = QColor(color)\n\n def setRevolutionsPerSecond(self, revolutionsPerSecond):\n self._revolutionsPerSecond = revolutionsPerSecond\n self.updateTimer()\n\n def setTrailFadePercentage(self, trail):\n self._trailFadePercentage = trail\n\n def setMinimumTrailOpacity(self, minimumTrailOpacity):\n self._minimumTrailOpacity = minimumTrailOpacity\n\n def rotate(self):\n self._currentCounter += 1\n if self._currentCounter >= self._numberOfLines:\n self._currentCounter = 0\n self.update()\n\n def updateSize(self):\n size = int((self._innerRadius + self._lineLength) * 2)\n self.setFixedSize(size, size)\n\n def updateTimer(self):\n self._timer.setInterval(int(1000 / (self._numberOfLines * self._revolutionsPerSecond)))\n\n def updatePosition(self):\n if self.parentWidget() and self._centerOnParent:\n self.move(\n int(self.parentWidget().width() / 2 - self.width() / 2),\n int(self.parentWidget().height() / 2 - self.height() / 2),\n )\n\n def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):\n distance = primary - current\n if distance < 0:\n distance += totalNrOfLines\n return distance\n\n def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):\n color = QColor(colorinput)\n if countDistance == 0:\n return color\n minAlphaF = minOpacity / 100.0\n distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))\n if countDistance > distanceThreshold:\n color.setAlphaF(minAlphaF)\n else:\n alphaDiff = color.alphaF() - minAlphaF\n gradient = alphaDiff / float(distanceThreshold + 1)\n resultAlpha = color.alphaF() - gradient * countDistance\n # If alpha is out of bounds, clip it.\n resultAlpha = min(1.0, max(0.0, resultAlpha))\n color.setAlphaF(resultAlpha)\n return color" }, { "identifier": "Worker", "path": "tidal_dl_ng/worker.py", "snippet": "class Worker(QtCore.QRunnable):\n \"\"\"\n Worker thread\n\n Inherits from QRunnable to handler worker thread setup, signals and wrap-up.\n\n :param callback: The function callback to run on this worker thread. Supplied args and\n kwargs will be passed through to the runner.\n :type callback: function\n :param args: Arguments to pass to the callback function\n :param kwargs: Keywords to pass to the callback function\n\n \"\"\"\n\n def __init__(self, fn, *args, **kwargs):\n super().__init__()\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n\n @QtCore.Slot() # QtCore.Slot\n def run(self):\n \"\"\"\n Initialise the runner function with passed args, kwargs.\n \"\"\"\n self.fn(*self.args, **self.kwargs)" } ]
import math import sys import qdarktheme import coloredlogs.converter from collections.abc import Callable from tidal_dl_ng.helper.path import get_format_template from PySide6 import QtCore, QtGui, QtWidgets from rich.progress import Progress from tidalapi import Album, Mix, Playlist, Quality, Track, UserPlaylist, Video from tidalapi.session import SearchTypes from tidal_dl_ng.config import Settings, Tidal from tidal_dl_ng.constants import QualityVideo, TidalLists from tidal_dl_ng.download import Download from tidal_dl_ng.logger import XStream, logger_gui from tidal_dl_ng.model.gui_data import ProgressBars, ResultSearch from tidal_dl_ng.ui.main import Ui_MainWindow from tidal_dl_ng.ui.spinner import QtWaitingSpinner from tidal_dl_ng.worker import Worker
11,836
try: except ImportError as e: print(e) print("Qt dependencies missing. Cannot start GUI. Please execute: 'pip install pyside6 pyqtdarktheme'") sys.exit(1) # TODO: Make more use of Exceptions # TODO: Add File -> Version class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
try: except ImportError as e: print(e) print("Qt dependencies missing. Cannot start GUI. Please execute: 'pip install pyside6 pyqtdarktheme'") sys.exit(1) # TODO: Make more use of Exceptions # TODO: Add File -> Version class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
settings: Settings = None
1
2023-12-19 23:05:47+00:00
16k
zyrant/SPGroup3D
tests/test_data/test_datasets/test_scannet_dataset.py
[ { "identifier": "ScanNetDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetDataset(Custom3DDataset):\n r\"\"\"ScanNet Dataset for Detection Task.\n\n This class serves as the API for experiments on the ScanNet Dataset.\n\n Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_\n for data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'Depth' in this dataset. Available options includes\n\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n \"\"\"\n CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin')\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n modality=dict(use_camera=False, use_depth=True),\n box_type_3d='Depth',\n filter_empty_gt=True,\n test_mode=False,\n **kwargs):\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode,\n **kwargs)\n assert 'use_camera' in self.modality and \\\n 'use_depth' in self.modality\n assert self.modality['use_camera'] or self.modality['use_depth']\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data\n preprocessing pipelines. It includes the following keys:\n\n - sample_idx (str): Sample index.\n - pts_filename (str): Filename of point clouds.\n - file_name (str): Filename of point clouds.\n - img_prefix (str, optional): Prefix of image files.\n - img_info (dict, optional): Image info.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n sample_idx = info['point_cloud']['lidar_idx']\n pts_filename = osp.join(self.data_root, info['pts_path'])\n input_dict = dict(sample_idx=sample_idx)\n\n if self.modality['use_depth']:\n input_dict['pts_filename'] = pts_filename\n input_dict['file_name'] = pts_filename\n\n if self.modality['use_camera']:\n img_info = []\n for img_path in info['img_paths']:\n img_info.append(\n dict(filename=osp.join(self.data_root, img_path)))\n intrinsic = info['intrinsics']\n axis_align_matrix = self._get_axis_align_matrix(info)\n depth2img = []\n for extrinsic in info['extrinsics']:\n depth2img.append(\n intrinsic @ np.linalg.inv(axis_align_matrix @ extrinsic))\n\n input_dict['img_prefix'] = None\n input_dict['img_info'] = img_info\n input_dict['depth2img'] = depth2img\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():\n return None\n return input_dict\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`DepthInstance3DBoxes`):\n 3D ground truth bboxes\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - pts_instance_mask_path (str): Path of instance masks.\n - pts_semantic_mask_path (str): Path of semantic masks.\n - axis_align_matrix (np.ndarray): Transformation matrix for\n global scene alignment.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n if info['annos']['gt_num'] != 0:\n gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(\n np.float32) # k, 6\n gt_labels_3d = info['annos']['class'].astype(np.int64)\n else:\n gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)\n gt_labels_3d = np.zeros((0, ), dtype=np.int64)\n\n # to target box structure\n gt_bboxes_3d = DepthInstance3DBoxes(\n gt_bboxes_3d,\n box_dim=gt_bboxes_3d.shape[-1],\n with_yaw=False,\n origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n axis_align_matrix = self._get_axis_align_matrix(info)\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path,\n axis_align_matrix=axis_align_matrix)\n return anns_results\n\n def prepare_test_data(self, index):\n \"\"\"Prepare data for testing.\n\n We should take axis_align_matrix from self.data_infos since we need\n to align point clouds.\n\n Args:\n index (int): Index for accessing the target data.\n\n Returns:\n dict: Testing data dict of the corresponding index.\n \"\"\"\n input_dict = self.get_data_info(index)\n # take the axis_align_matrix from data_infos\n input_dict['ann_info'] = dict(\n axis_align_matrix=self._get_axis_align_matrix(\n self.data_infos[index]))\n self.pre_pipeline(input_dict)\n example = self.pipeline(input_dict)\n return example\n\n @staticmethod\n def _get_axis_align_matrix(info):\n \"\"\"Get axis_align_matrix from info. If not exist, return identity mat.\n\n Args:\n info (dict): one data info term.\n\n Returns:\n np.ndarray: 4x4 transformation matrix.\n \"\"\"\n if 'axis_align_matrix' in info['annos'].keys():\n return info['annos']['axis_align_matrix'].astype(np.float32)\n else:\n warnings.warn(\n 'axis_align_matrix is not found in ScanNet data info, please '\n 'use new pre-process scripts to re-generate ScanNet data')\n return np.eye(4).astype(np.float32)\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(type='GlobalAlignment', rotation_axis=2),\n dict(\n type='DefaultFormatBundle3D',\n class_names=self.CLASSES,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._build_default_pipeline()\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points = self._extract_data(i, pipeline, 'points', load_annos=True).numpy()\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']\n gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None\n gt_labels = self.get_ann_info(i)['gt_labels_3d']\n pred_bboxes = result['boxes_3d']\n pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None\n pred_labels = result['labels_3d']\n show_result_v2(points, gt_bboxes, gt_labels,\n pred_bboxes, pred_labels, out_dir, file_name)" }, { "identifier": "ScanNetInstanceSegDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetInstanceSegDataset(Custom3DSegDataset):\n CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin')\n\n VALID_CLASS_IDS = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34,\n 36, 39)\n\n ALL_CLASS_IDS = tuple(range(41))\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n - pts_semantic_mask_path (str): Path of semantic masks.\n - pts_instance_mask_path (str): Path of instance masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def get_classes_and_palette(self, classes=None, palette=None):\n \"\"\"Get class names of current dataset. Palette is simply ignored for\n instance segmentation.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n Defaults to None.\n palette (Sequence[Sequence[int]]] | np.ndarray | None):\n The palette of segmentation map. If None is given, random\n palette will be generated. Defaults to None.\n \"\"\"\n if classes is not None:\n return classes, None\n return self.CLASSES, None\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=True,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=40),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(\n type='Collect3D',\n keys=['points', 'pts_semantic_mask', 'pts_instance_mask'])\n ]\n return Compose(pipeline)\n\n def evaluate(self,\n results,\n metric=None,\n options=None,\n logger=None,\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in instance segmentation protocol.\n\n Args:\n results (list[dict]): List of results.\n metric (str | list[str]): Metrics to be evaluated.\n options (dict, optional): options for instance_seg_eval.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Defaults to None.\n show (bool, optional): Whether to visualize.\n Defaults to False.\n out_dir (str, optional): Path to save the visualization results.\n Defaults to None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict: Evaluation results.\n \"\"\"\n assert isinstance(\n results, list), f'Expect results to be list, got {type(results)}.'\n assert len(results) > 0, 'Expect length of results > 0.'\n assert len(results) == len(self.data_infos)\n assert isinstance(\n results[0], dict\n ), f'Expect elements in results to be dict, got {type(results[0])}.'\n\n load_pipeline = self._get_pipeline(pipeline)\n pred_instance_masks = [result['instance_mask'] for result in results]\n pred_instance_labels = [result['instance_label'] for result in results]\n pred_instance_scores = [result['instance_score'] for result in results]\n gt_semantic_masks, gt_instance_masks = zip(*[\n self._extract_data(\n index=i,\n pipeline=load_pipeline,\n key=['pts_semantic_mask', 'pts_instance_mask'],\n load_annos=True) for i in range(len(self.data_infos))\n ])\n ret_dict = instance_seg_eval(\n gt_semantic_masks,\n gt_instance_masks,\n pred_instance_masks,\n pred_instance_labels,\n pred_instance_scores,\n valid_class_ids=self.VALID_CLASS_IDS,\n class_labels=self.CLASSES,\n options=options,\n logger=logger)\n\n if show:\n raise NotImplementedError('show is not implemented for now')\n\n return ret_dict" }, { "identifier": "ScanNetSegDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetSegDataset(Custom3DSegDataset):\n r\"\"\"ScanNet Dataset for Semantic Segmentation Task.\n\n This class serves as the API for experiments on the ScanNet Dataset.\n\n Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_\n for data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n palette (list[list[int]], optional): The palette of segmentation map.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n ignore_index (int, optional): The label index to be ignored, e.g.\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n scene_idxs (np.ndarray | str, optional): Precomputed index to load\n data. For scenes with many points, we may sample it several times.\n Defaults to None.\n \"\"\"\n CLASSES = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',\n 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',\n 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink',\n 'bathtub', 'otherfurniture')\n\n VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28,\n 33, 34, 36, 39)\n\n ALL_CLASS_IDS = tuple(range(41))\n\n PALETTE = [\n [174, 199, 232],\n [152, 223, 138],\n [31, 119, 180],\n [255, 187, 120],\n [188, 189, 34],\n [140, 86, 75],\n [255, 152, 150],\n [214, 39, 40],\n [197, 176, 213],\n [148, 103, 189],\n [196, 156, 148],\n [23, 190, 207],\n [247, 182, 210],\n [219, 219, 141],\n [255, 127, 14],\n [158, 218, 229],\n [44, 160, 44],\n [112, 128, 144],\n [227, 119, 194],\n [82, 84, 163],\n ]\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n palette=None,\n modality=None,\n test_mode=False,\n ignore_index=None,\n scene_idxs=None,\n **kwargs):\n\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs,\n **kwargs)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - pts_semantic_mask_path (str): Path of semantic masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=False,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=np.max(self.ALL_CLASS_IDS)),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_sem_mask = self._extract_data(\n i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n pred_sem_mask = result['semantic_mask'].numpy()\n show_seg_result(points, gt_sem_mask,\n pred_sem_mask, out_dir, file_name,\n np.array(self.PALETTE), self.ignore_index, show)\n\n def get_scene_idxs(self, scene_idxs):\n \"\"\"Compute scene_idxs for data sampling.\n\n We sample more times for scenes with more points.\n \"\"\"\n # when testing, we load one whole scene every time\n if not self.test_mode and scene_idxs is None:\n raise NotImplementedError(\n 'please provide re-sampled scene indexes for training')\n\n return super().get_scene_idxs(scene_idxs)\n\n def format_results(self, results, txtfile_prefix=None):\n r\"\"\"Format the results to txt file. Refer to `ScanNet documentation\n <http://kaldir.vc.in.tum.de/scannet_benchmark/documentation>`_.\n\n Args:\n outputs (list[dict]): Testing results of the dataset.\n txtfile_prefix (str): The prefix of saved files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (outputs, tmp_dir), outputs is the detection results,\n tmp_dir is the temporal directory created for saving submission\n files when ``submission_prefix`` is not specified.\n \"\"\"\n import mmcv\n\n if txtfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n txtfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n mmcv.mkdir_or_exist(txtfile_prefix)\n\n # need to map network output to original label idx\n pred2label = np.zeros(len(self.VALID_CLASS_IDS)).astype(np.int)\n for original_label, output_idx in self.label_map.items():\n if output_idx != self.ignore_index:\n pred2label[output_idx] = original_label\n\n outputs = []\n for i, result in enumerate(results):\n info = self.data_infos[i]\n sample_idx = info['point_cloud']['lidar_idx']\n pred_sem_mask = result['semantic_mask'].numpy().astype(np.int)\n pred_label = pred2label[pred_sem_mask]\n curr_file = f'{txtfile_prefix}/{sample_idx}.txt'\n np.savetxt(curr_file, pred_label, fmt='%d')\n outputs.append(dict(seg_mask=pred_label))\n\n return outputs, tmp_dir" }, { "identifier": "ScanNetInstanceSegV2Dataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetInstanceSegV2Dataset(ScanNetDataset):\n VALID_CLASS_IDS = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28,\n 33, 34, 36, 39)\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=True,\n with_seg_3d=True),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(\n type='Collect3D',\n keys=['points', 'pts_semantic_mask', 'pts_instance_mask'])\n ]\n return Compose(pipeline)\n\n def evaluate(self,\n results,\n metric=None,\n options=None,\n logger=None,\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in instance segmentation protocol.\n\n Args:\n results (list[dict]): List of results.\n metric (str | list[str]): Metrics to be evaluated.\n options (dict, optional): options for instance_seg_eval.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Defaults to None.\n show (bool, optional): Whether to visualize.\n Defaults to False.\n out_dir (str, optional): Path to save the visualization results.\n Defaults to None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict: Evaluation results.\n \"\"\"\n assert isinstance(\n results, list), f'Expect results to be list, got {type(results)}.'\n assert len(results) > 0, 'Expect length of results > 0.'\n assert len(results) == len(self.data_infos)\n assert isinstance(\n results[0], dict\n ), f'Expect elements in results to be dict, got {type(results[0])}.'\n\n load_pipeline = self._build_default_pipeline()\n pred_instance_masks = [result['instance_mask'] for result in results]\n pred_instance_labels = [result['instance_label'] for result in results]\n pred_instance_scores = [result['instance_score'] for result in results]\n gt_semantic_masks, gt_instance_masks = zip(*[\n self._extract_data(\n index=i,\n pipeline=load_pipeline,\n key=['pts_semantic_mask', 'pts_instance_mask'],\n load_annos=True) for i in range(len(self.data_infos))\n ])\n ret_dict = instance_seg_eval_v2(\n gt_semantic_masks,\n gt_instance_masks,\n pred_instance_masks,\n pred_instance_labels,\n pred_instance_scores,\n valid_class_ids=self.VALID_CLASS_IDS,\n class_labels=self.CLASSES,\n options=options,\n logger=logger)\n\n if show:\n self.show(results, out_dir)\n\n return ret_dict\n\n def show(self, results, out_dir, show=True, pipeline=None):\n assert out_dir is not None, 'Expect out_dir, got none.'\n load_pipeline = self._build_default_pipeline()\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_instance_mask, gt_sem_mask = self._extract_data(\n i, load_pipeline, ['points', 'pts_instance_mask', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n gt_inst_mask_final = np.zeros_like(gt_instance_mask)\n for cls_idx in self.VALID_CLASS_IDS:\n mask = gt_sem_mask == cls_idx\n gt_inst_mask_final += mask.numpy()\n gt_instance_mask[gt_inst_mask_final == 0] = -1\n\n pred_instance_masks = result['instance_mask']\n pred_instance_scores = result['instance_score']\n\n pred_instance_masks_sort = pred_instance_masks[pred_instance_scores.argsort()]\n pred_instance_masks_label = pred_instance_masks_sort[0].long() - 1\n for i in range(1, pred_instance_masks_sort.shape[0]):\n pred_instance_masks_label[pred_instance_masks_sort[i]] = i\n\n palette = np.random.random((max(max(pred_instance_masks_label) + 2, max(gt_instance_mask) + 2), 3)) * 255\n palette[-1] = 255\n\n show_seg_result(points, gt_instance_mask,\n pred_instance_masks_label, out_dir, file_name,\n palette)" } ]
import copy import numpy as np import pytest import torch import tempfile import tempfile import mmcv import tempfile import tempfile import mmcv import mmcv from mmdet3d.datasets import (ScanNetDataset, ScanNetInstanceSegDataset, ScanNetSegDataset, ScanNetInstanceSegV2Dataset) from mmdet3d.core.bbox.structures import DepthInstance3DBoxes from os import path as osp from mmdet3d.core.bbox import DepthInstance3DBoxes from os import path as osp from os import path as osp
11,219
2.0221e-02, 2.6153e+00, 1.5109e-02, 7.3335e-01, 1.0429e+00, 1.0251e+00, 0.0000e+00 ]])) scores_3d = torch.tensor( [1.2058e-04, 2.3012e-03, 6.2324e-06, 6.6139e-06, 6.7965e-05]) labels_3d = torch.tensor([0, 0, 0, 0, 0]) result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # show function with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') palette = [ [174, 199, 232], [152, 223, 138], [31, 119, 180], [255, 187, 120], [188, 189, 34], [140, 86, 75], [255, 152, 150], [214, 39, 40], [197, 176, 213], [148, 103, 189], [196, 156, 148], [23, 190, 207], [247, 182, 210], [219, 219, 141], [255, 127, 14], [158, 218, 229], [44, 160, 44], [112, 128, 144], [227, 119, 194], [82, 84, 163], ] scene_idxs = [0 for _ in range(20)] # test network inputs are (xyz, rgb, normalized_xyz) pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=True, enlarge_size=0.2, min_unique_num=None), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask'], meta_keys=['file_name', 'sample_idx']) ]
# Copyright (c) OpenMMLab. All rights reserved. def test_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=True, load_dim=6, use_dim=[0, 1, 2]), dict( type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_mask_3d=True, with_seg_3d=True), dict(type='GlobalAlignment', rotation_axis=2), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)), dict(type='PointSample', num_points=5), dict( type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=1.0, flip_ratio_bev_vertical=1.0), dict( type='GlobalRotScaleTrans', rot_range=[-0.087266, 0.087266], scale_ratio_range=[1.0, 1.0], shift_height=True), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=[ 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', 'pts_instance_mask' ], meta_keys=['file_name', 'sample_idx', 'pcd_rotation']), ] scannet_dataset = ScanNetDataset(root_path, ann_file, pipelines) data = scannet_dataset[0] points = data['points']._data gt_bboxes_3d = data['gt_bboxes_3d']._data gt_labels = data['gt_labels_3d']._data pts_semantic_mask = data['pts_semantic_mask']._data pts_instance_mask = data['pts_instance_mask']._data file_name = data['img_metas']._data['file_name'] pcd_rotation = data['img_metas']._data['pcd_rotation'] sample_idx = data['img_metas']._data['sample_idx'] expected_rotation = np.array([[0.99654, 0.08311407, 0.], [-0.08311407, 0.99654, 0.], [0., 0., 1.]]) assert file_name == './tests/data/scannet/points/scene0000_00.bin' assert np.allclose(pcd_rotation, expected_rotation, 1e-3) assert sample_idx == 'scene0000_00' expected_points = torch.tensor( [[1.8339e+00, 2.1093e+00, 2.2900e+00, 2.3895e+00], [3.6079e+00, 1.4592e-01, 2.0687e+00, 2.1682e+00], [4.1886e+00, 5.0614e+00, -1.0841e-01, -8.8736e-03], [6.8790e+00, 1.5086e+00, -9.3154e-02, 6.3816e-03], [4.8253e+00, 2.6668e-01, 1.4917e+00, 1.5912e+00]]) expected_gt_bboxes_3d = torch.tensor( [[-1.1835, -3.6317, 1.5704, 1.7577, 0.3761, 0.5724, 0.0000], [-3.1832, 3.2269, 1.1911, 0.6727, 0.2251, 0.6715, 0.0000], [-0.9598, -2.2864, 0.0093, 0.7506, 2.5709, 1.2145, 0.0000], [-2.6988, -2.7354, 0.8288, 0.7680, 1.8877, 0.2870, 0.0000], [3.2989, 0.2885, -0.0090, 0.7600, 3.8814, 2.1603, 0.0000]]) expected_gt_labels = np.array([ 6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0, 0, 0, 0, 5, 5, 5 ]) expected_pts_semantic_mask = np.array([0, 18, 18, 18, 18]) expected_pts_instance_mask = np.array([44, 22, 10, 10, 57]) original_classes = scannet_dataset.CLASSES assert scannet_dataset.CLASSES == class_names assert torch.allclose(points, expected_points, 1e-2) assert gt_bboxes_3d.tensor[:5].shape == (5, 7) assert torch.allclose(gt_bboxes_3d.tensor[:5], expected_gt_bboxes_3d, 1e-2) assert np.all(gt_labels.numpy() == expected_gt_labels) assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask) assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask) assert original_classes == class_names scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=['cabinet', 'bed']) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=('cabinet', 'bed')) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ('cabinet', 'bed') # Test load classes from file with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('cabinet\nbed\n') scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=path) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] def test_evaluate(): if not torch.cuda.is_available(): pytest.skip() root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) results = [] pred_boxes = dict() pred_boxes['boxes_3d'] = DepthInstance3DBoxes( torch.tensor([[ 1.4813e+00, 3.5207e+00, 1.5704e+00, 1.7445e+00, 2.3196e-01, 5.7235e-01, 0.0000e+00 ], [ 2.9040e+00, -3.4803e+00, 1.1911e+00, 6.6078e-01, 1.7072e-01, 6.7154e-01, 0.0000e+00 ], [ 1.1466e+00, 2.1987e+00, 9.2576e-03, 5.4184e-01, 2.5346e+00, 1.2145e+00, 0.0000e+00 ], [ 2.9168e+00, 2.5016e+00, 8.2875e-01, 6.1697e-01, 1.8428e+00, 2.8697e-01, 0.0000e+00 ], [ -3.3114e+00, -1.3351e-02, -8.9524e-03, 4.4082e-01, 3.8582e+00, 2.1603e+00, 0.0000e+00 ], [ -2.0135e+00, -3.4857e+00, 9.3848e-01, 1.9911e+00, 2.1603e-01, 1.2767e+00, 0.0000e+00 ], [ -2.1945e+00, -3.1402e+00, -3.8165e-02, 1.4801e+00, 6.8676e-01, 1.0586e+00, 0.0000e+00 ], [ -2.7553e+00, 2.4055e+00, -2.9972e-02, 1.4764e+00, 1.4927e+00, 2.3380e+00, 0.0000e+00 ]])) pred_boxes['labels_3d'] = torch.tensor([6, 6, 4, 9, 11, 11]) pred_boxes['scores_3d'] = torch.tensor([0.5, 1.0, 1.0, 1.0, 1.0, 0.5]) results.append(pred_boxes) metric = [0.25, 0.5] ret_dict = scannet_dataset.evaluate(results, metric) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 # test evaluate with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] ret_dict = scannet_dataset.evaluate( results, metric, pipeline=eval_pipeline) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 def test_show(): tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) boxes_3d = DepthInstance3DBoxes( torch.tensor([[ -2.4053e+00, 9.2295e-01, 8.0661e-02, 2.4054e+00, 2.1468e+00, 8.5990e-01, 0.0000e+00 ], [ -1.9341e+00, -2.0741e+00, 3.0698e-03, 3.2206e-01, 2.5322e-01, 3.5144e-01, 0.0000e+00 ], [ -3.6908e+00, 8.0684e-03, 2.6201e-01, 4.1515e-01, 7.6489e-01, 5.3585e-01, 0.0000e+00 ], [ 2.6332e+00, 8.5143e-01, -4.9964e-03, 3.0367e-01, 1.3448e+00, 1.8329e+00, 0.0000e+00 ], [ 2.0221e-02, 2.6153e+00, 1.5109e-02, 7.3335e-01, 1.0429e+00, 1.0251e+00, 0.0000e+00 ]])) scores_3d = torch.tensor( [1.2058e-04, 2.3012e-03, 6.2324e-06, 6.6139e-06, 6.7965e-05]) labels_3d = torch.tensor([0, 0, 0, 0, 0]) result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # show function with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') palette = [ [174, 199, 232], [152, 223, 138], [31, 119, 180], [255, 187, 120], [188, 189, 34], [140, 86, 75], [255, 152, 150], [214, 39, 40], [197, 176, 213], [148, 103, 189], [196, 156, 148], [23, 190, 207], [247, 182, 210], [219, 219, 141], [255, 127, 14], [158, 218, 229], [44, 160, 44], [112, 128, 144], [227, 119, 194], [82, 84, 163], ] scene_idxs = [0 for _ in range(20)] # test network inputs are (xyz, rgb, normalized_xyz) pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=True, enlarge_size=0.2, min_unique_num=None), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask'], meta_keys=['file_name', 'sample_idx']) ]
scannet_dataset = ScanNetSegDataset(
2
2023-12-21 12:50:35+00:00
16k
v3ucn/Bert-vits2-V2.2
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n self.empty_emo = torch.squeeze(\n torch.load(\"empty_emo.npy\", map_location=\"cpu\"), dim=1\n )\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n if np.random.rand() > 0.1:\n emo = torch.squeeze(\n torch.load(audiopath.replace(\".wav\", \".emo.npy\"), map_location=\"cpu\"),\n dim=1,\n )\n else:\n emo = self.empty_emo\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert, emo)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.rand(1024, len(phone))\n en_bert = torch.rand(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.rand(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.rand(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.rand(1024, len(phone))\n ja_bert = torch.rand(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n emo = torch.FloatTensor(len(batch), 512)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n emo.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n emo[i, :] = row[9]\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n emo,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n self.n_speakers,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, loss_commit = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n g,\n loss_commit,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, _ = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
10,900
"--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank)
5
2023-12-18 04:54:46+00:00
16k
m-abr/FCPCodebase
scripts/utils/Inv_Kinematics.py
[ { "identifier": "Base_Agent", "path": "agent/Base_Agent.py", "snippet": "class Base_Agent():\n all_agents = []\n\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,\n enable_draw:bool=True, apply_play_mode_correction:bool=True, wait_for_server:bool=True, hear_callback=None) -> None:\n\n self.radio = None # hear_message may be called during Server_Comm instantiation\n self.logger = Logger(enable_log, f\"{team_name}_{unum}\")\n self.world = World(robot_type, team_name, unum, apply_play_mode_correction, enable_draw, self.logger, host)\n self.world_parser = World_Parser(self.world, self.hear_message if hear_callback is None else hear_callback)\n self.scom = Server_Comm(host,agent_port,monitor_port,unum,robot_type,team_name,self.world_parser,self.world,Base_Agent.all_agents,wait_for_server)\n self.inv_kinematics = Inverse_Kinematics(self.world.robot)\n self.behavior = Behavior(self)\n self.path_manager = Path_Manager(self.world)\n self.radio = Radio(self.world, self.scom.commit_announcement)\n self.behavior.create_behaviors()\n Base_Agent.all_agents.append(self)\n\n @abstractmethod\n def think_and_send(self):\n pass\n\n def hear_message(self, msg:bytearray, direction, timestamp:float) -> None:\n if direction != \"self\" and self.radio is not None:\n self.radio.receive(msg)\n\n def terminate(self):\n # close shared monitor socket if this is the last agent on this thread\n self.scom.close(close_monitor_socket=(len(Base_Agent.all_agents)==1))\n Base_Agent.all_agents.remove(self)\n\n @staticmethod\n def terminate_all():\n for o in Base_Agent.all_agents:\n o.scom.close(True) # close shared monitor socket, if it exists\n Base_Agent.all_agents = []" }, { "identifier": "Inverse_Kinematics", "path": "math_ops/Inverse_Kinematics.py", "snippet": "class Inverse_Kinematics():\n\n # leg y deviation, upper leg height, upper leg depth, lower leg length, knee extra angle, max ankle z\n NAO_SPECS_PER_ROBOT = ((0.055, 0.12, 0.005, 0.1, atan(0.005/0.12), -0.091),\n (0.055, 0.13832, 0.005, 0.11832, atan(0.005/0.13832), -0.106),\n (0.055, 0.12, 0.005, 0.1, atan(0.005/0.12), -0.091),\n (0.072954143,0.147868424, 0.005, 0.127868424, atan(0.005/0.147868424), -0.114),\n (0.055, 0.12, 0.005, 0.1, atan(0.005/0.12), -0.091))\n\n TORSO_HIP_Z = 0.115 # distance in the z-axis, between the torso and each hip (same for all robots)\n TORSO_HIP_X = 0.01 # distance in the x-axis, between the torso and each hip (same for all robots) (hip is 0.01m to the back)\n\n def __init__(self, robot) -> None:\n self.robot = robot\n self.NAO_SPECS = Inverse_Kinematics.NAO_SPECS_PER_ROBOT[robot.type]\n\n def torso_to_hip_transform(self, coords, is_batch=False):\n '''\n Convert cartesian coordinates that are relative to torso to coordinates that are relative the center of both hip joints\n \n Parameters\n ----------\n coords : array_like\n One 3D position or list of 3D positions\n is_batch : `bool`\n Indicates if coords is a batch of 3D positions\n\n Returns\n -------\n coord : `list` or ndarray\n A numpy array is returned if is_batch is False, otherwise, a list of arrays is returned \n '''\n if is_batch:\n return [c + (Inverse_Kinematics.TORSO_HIP_X, 0, Inverse_Kinematics.TORSO_HIP_Z) for c in coords]\n else:\n return coords + (Inverse_Kinematics.TORSO_HIP_X, 0, Inverse_Kinematics.TORSO_HIP_Z)\n \n\n def head_to_hip_transform(self, coords, is_batch=False):\n '''\n Convert cartesian coordinates that are relative to head to coordinates that are relative the center of both hip joints\n \n Parameters\n ----------\n coords : array_like\n One 3D position or list of 3D positions\n is_batch : `bool`\n Indicates if coords is a batch of 3D positions\n\n Returns\n -------\n coord : `list` or ndarray\n A numpy array is returned if is_batch is False, otherwise, a list of arrays is returned \n '''\n coords_rel_torso = self.robot.head_to_body_part_transform( \"torso\", coords, is_batch )\n return self.torso_to_hip_transform(coords_rel_torso, is_batch)\n\n def get_body_part_pos_relative_to_hip(self, body_part_name):\n ''' Get body part position relative to the center of both hip joints '''\n bp_rel_head = self.robot.body_parts[body_part_name].transform.get_translation()\n return self.head_to_hip_transform(bp_rel_head)\n\n def get_ankle_pos_relative_to_hip(self, is_left):\n ''' Internally calls get_body_part_pos_relative_to_hip() '''\n return self.get_body_part_pos_relative_to_hip(\"lankle\" if is_left else \"rankle\")\n\n def get_linear_leg_trajectory(self, is_left:bool, p1, p2=None, foot_ori3d=(0,0,0), dynamic_pose:bool=True, resolution=100):\n ''' \n Compute leg trajectory so that the ankle moves linearly between two 3D points (relative to hip) \n \n Parameters\n ----------\n is_left : `bool`\n set to True to select left leg, False to select right leg\n p1 : array_like, length 3\n if p2 is None: \n p1 is the target position (relative to hip), and the initial point is given by the ankle's current position\n if p2 is not None: \n p1 is the initial point (relative to hip)\n p2 : array_like, length 3 / `None`\n target position (relative to hip) or None (see p1)\n foot_ori3d : array_like, length 3\n rotation around x,y,z (rotation around x & y are biases, relative to a vertical pose, or dynamic pose, if enabled)\n dynamic_pose : `bool`\n enable dynamic feet rotation to be parallel to the ground, based on IMU\n resolution : int\n interpolation resolution; more resolution is always better, but it takes more time to compute;\n having more points does not make the movement slower, because if there are excessive points they are removed\n during the analytical optimization\n\n Returns\n -------\n trajecory : `tuple`\n indices, [[values_1,error_codes_1], [values_2,error_codes_2], ...]\n See leg() for further details\n '''\n\n if p2 is None:\n p2 = np.asarray(p1, float)\n p1 = self.get_body_part_pos_relative_to_hip('lankle' if is_left else 'rankle')\n else:\n p1 = np.asarray(p1, float)\n p2 = np.asarray(p2, float)\n\n vec = (p2 - p1) / resolution\n\n\n hip_points = [p1 + vec * i for i in range(1,resolution+1)]\n interpolation = [self.leg(p, foot_ori3d, is_left, dynamic_pose) for p in hip_points]\n\n indices = [2,4,6,8,10,12] if is_left else [3,5,7,9,11,13]\n\n last_joint_values = self.robot.joints_position[indices[0:4]] #exclude feet joints to compute ankle trajectory\n next_step = interpolation[0]\n trajectory = []\n\n for p in interpolation[1:-1]:\n if np.any(np.abs(p[1][0:4]-last_joint_values) > 7.03): \n trajectory.append(next_step[1:3])\n last_joint_values = next_step[1][0:4]\n next_step = p\n else:\n next_step = p\n\n trajectory.append(interpolation[-1][1:3])\n\n return indices, trajectory\n\n\n\n def leg(self, ankle_pos3d, foot_ori3d, is_left:bool, dynamic_pose:bool):\n '''\n Compute inverse kinematics for the leg, considering as input the relative 3D position of the ankle and 3D orientation* of the foot\n *the yaw can be controlled directly, but the pitch and roll are biases (see below)\n\n Parameters\n ----------\n ankle_pos3d : array_like, length 3\n (x,y,z) position of ankle in 3D, relative to the center of both hip joints\n foot_ori3d : array_like, length 3\n rotation around x,y,z (rotation around x & y are biases, relative to a vertical pose, or dynamic pose, if enabled)\n is_left : `bool`\n set to True to select left leg, False to select right leg\n dynamic_pose : `bool`\n enable dynamic feet rotation to be parallel to the ground, based on IMU\n\n Returns\n -------\n indices : `list`\n indices of computed joints\n values : `list`\n values of computed joints\n error_codes : `list`\n list of error codes\n Error codes:\n (-1) Foot is too far (unreachable)\n (x) Joint x is out of range\n '''\n\n error_codes = []\n leg_y_dev, upper_leg_height, upper_leg_depth, lower_leg_len, knee_extra_angle, _ = self.NAO_SPECS\n sign = -1 if is_left else 1\n\n # Then we translate to origin of leg by shifting the y coordinate\n ankle_pos3d = np.asarray(ankle_pos3d) + (0,sign*leg_y_dev,0)\n\n # First we rotate the leg, then we rotate the coordinates to abstract from the rotation\n ankle_pos3d = Matrix_3x3().rotate_z_deg(-foot_ori3d[2]).multiply(ankle_pos3d)\n\n # Use geometric solution to compute knee angle and foot pitch\n dist = np.linalg.norm(ankle_pos3d) #dist hip <-> ankle\n sq_dist = dist * dist\n sq_upper_leg_h = upper_leg_height * upper_leg_height\n sq_lower_leg_l = lower_leg_len * lower_leg_len\n sq_upper_leg_l = upper_leg_depth * upper_leg_depth + sq_upper_leg_h\n upper_leg_len = sqrt(sq_upper_leg_l)\n knee = M.acos((sq_upper_leg_l + sq_lower_leg_l - sq_dist)/(2 * upper_leg_len * lower_leg_len)) + knee_extra_angle # Law of cosines\n foot = M.acos((sq_lower_leg_l + sq_dist - sq_upper_leg_l)/(2 * lower_leg_len * dist)) # foot perpendicular to vec(origin->ankle_pos)\n\n # Check if target is reachable\n if dist > upper_leg_len + lower_leg_len: \n error_codes.append(-1)\n\n # Knee and foot\n knee_angle = pi - knee\n foot_pitch = foot - atan(ankle_pos3d[0] / np.linalg.norm(ankle_pos3d[1:3]))\n foot_roll = atan(ankle_pos3d[1] / min(-0.05, ankle_pos3d[2])) * -sign # avoid instability of foot roll (not relevant above -0.05m)\n\n # Raw hip angles if all joints were straightforward\n raw_hip_yaw = foot_ori3d[2]\n raw_hip_pitch = foot_pitch - knee_angle\n raw_hip_roll = -sign * foot_roll\n\n # Rotate 45deg due to yaw joint orientation, then rotate yaw, roll and pitch\n m = Matrix_3x3().rotate_y_rad(raw_hip_pitch).rotate_x_rad(raw_hip_roll).rotate_z_deg(raw_hip_yaw).rotate_x_deg(-45*sign)\n\n # Get actual hip angles considering the yaw joint orientation\n hip_roll = (pi/4) - (sign * asin(m.m[1,2])) #Add pi/4 due to 45deg rotation\n hip_pitch = - atan2(m.m[0,2],m.m[2,2])\n hip_yaw = sign * atan2(m.m[1,0],m.m[1,1])\n\n # Convert rad to deg\n values = np.array([hip_yaw,hip_roll,hip_pitch,-knee_angle,foot_pitch,foot_roll]) * 57.2957795 #rad to deg\n\n # Set feet rotation bias (based on vertical pose, or dynamic_pose)\n values[4] -= foot_ori3d[1]\n values[5] -= foot_ori3d[0] * sign\n\n indices = [2,4,6,8,10,12] if is_left else [3,5,7,9,11,13]\n\n if dynamic_pose:\n\n # Rotation of torso in relation to foot\n m : Matrix_3x3 = Matrix_3x3.from_rotation_deg((self.robot.imu_torso_roll, self.robot.imu_torso_pitch, 0))\n m.rotate_z_deg(foot_ori3d[2], True)\n\n roll = m.get_roll_deg()\n pitch = m.get_pitch_deg()\n\n # Simple balance algorithm\n correction = 1 #correction to motivate a vertical torso (in degrees)\n roll = 0 if abs(roll) < correction else roll - np.copysign(correction,roll)\n pitch = 0 if abs(pitch) < correction else pitch - np.copysign(correction,pitch)\n \n values[4] += pitch\n values[5] += roll * sign\n\n\n # Check and limit range of joints\n for i in range(len(indices)):\n if values[i] < self.robot.joints_info[indices[i]].min or values[i] > self.robot.joints_info[indices[i]].max: \n error_codes.append(indices[i])\n values[i] = np.clip(values[i], self.robot.joints_info[indices[i]].min, self.robot.joints_info[indices[i]].max)\n\n\n return indices, values, error_codes" }, { "identifier": "Script", "path": "scripts/commons/Script.py", "snippet": "class Script():\n ROOT_DIR = path.dirname(path.dirname(realpath( join(getcwd(), dirname(__file__))) )) # project root directory\n\n def __init__(self, cpp_builder_unum=0) -> None:\n\n '''\n Arguments specification\n -----------------------\n - To add new arguments, edit the information below\n - After changing information below, the config.json file must be manually deleted\n - In other modules, these arguments can be accessed by their 1-letter ID\n '''\n # list of arguments: 1-letter ID, Description, Hardcoded default\n self.options = {'i': ('Server Hostname/IP', 'localhost'),\n 'p': ('Agent Port', '3100'),\n 'm': ('Monitor Port', '3200'),\n 't': ('Team Name', 'FCPortugal'),\n 'u': ('Uniform Number', '1'),\n 'r': ('Robot Type', '1'),\n 'P': ('Penalty Shootout', '0'),\n 'F': ('magmaFatProxy', '0'),\n 'D': ('Debug Mode', '1')}\n\n # list of arguments: 1-letter ID, data type, choices \n self.op_types = {'i': (str, None),\n 'p': (int, None),\n 'm': (int, None),\n 't': (str, None),\n 'u': (int, range(1,12)),\n 'r': (int, [0,1,2,3,4]),\n 'P': (int, [0,1]),\n 'F': (int, [0,1]),\n 'D': (int, [0,1])}\n \n '''\n End of arguments specification\n '''\n\n self.read_or_create_config()\n\n #advance help text position\n formatter = lambda prog: argparse.HelpFormatter(prog,max_help_position=52)\n parser = argparse.ArgumentParser(formatter_class=formatter)\n\n o = self.options\n t = self.op_types\n\n for id in self.options: # shorter metavar for aesthetic reasons\n parser.add_argument(f\"-{id}\", help=f\"{o[id][0]:30}[{o[id][1]:20}]\", type=t[id][0], nargs='?', default=o[id][1], metavar='X', choices=t[id][1])\n \n self.args = parser.parse_args()\n\n if getattr(sys, 'frozen', False): # disable debug mode when running from binary\n self.args.D = 0\n\n self.players = [] # list of created players\n\n Script.build_cpp_modules(exit_on_build = (cpp_builder_unum != 0 and cpp_builder_unum != self.args.u))\n\n if self.args.D:\n try:\n print(f\"\\nNOTE: for help run \\\"python {__main__.__file__} -h\\\"\")\n except:\n pass\n\n columns = [[],[],[]]\n for key, value in vars(self.args).items():\n columns[0].append(o[key][0])\n columns[1].append(o[key][1])\n columns[2].append(value)\n\n UI.print_table(columns, [\"Argument\",\"Default at /config.json\",\"Active\"], alignment=[\"<\",\"^\",\"^\"])\n\n\n def read_or_create_config(self) -> None:\n\n if not path.isfile('config.json'): # save hardcoded default values if file does not exist\n with open(\"config.json\", \"w\") as f:\n json.dump(self.options, f, indent=4)\n else: # load user-defined values (that can be overwritten by command-line arguments)\n if path.getsize(\"config.json\") == 0: # wait for possible write operation when launching multiple agents\n from time import sleep\n sleep(1)\n if path.getsize(\"config.json\") == 0: # abort after 1 second\n print(\"Aborting: 'config.json' is empty. Manually verify and delete if still empty.\")\n exit()\n \n with open(\"config.json\", \"r\") as f:\n self.options = json.loads(f.read())\n\n\n @staticmethod\n def build_cpp_modules(special_environment_prefix=[], exit_on_build=False):\n '''\n Build C++ modules in folder /cpp using Pybind11\n \n Parameters\n ----------\n special_environment_prefix : `list`\n command prefix to run a given command in the desired environment\n useful to compile C++ modules for different python interpreter versions (other than default version)\n Conda Env. example: ['conda', 'run', '-n', 'myEnv']\n If [] the default python interpreter is used as compilation target\n exit_on_build : bool\n exit if there is something to build (so that only 1 player per team builds c++ modules)\n '''\n cpp_path = Script.ROOT_DIR + \"/cpp/\"\n exclusions = [\"__pycache__\"]\n\n cpp_modules = [d for d in listdir(cpp_path) if isdir(join(cpp_path, d)) and d not in exclusions]\n\n if not cpp_modules: return #no modules to build\n\n python_cmd = f\"python{sys.version_info.major}.{sys.version_info.minor}\" # \"python3\" can select the wrong version, this prevents that\n\n def init():\n print(\"--------------------------\\nC++ modules:\",cpp_modules)\n\n try:\n process = subprocess.Popen(special_environment_prefix+[python_cmd, \"-m\", \"pybind11\", \"--includes\"], stdout=subprocess.PIPE)\n (includes, err) = process.communicate()\n process.wait()\n except:\n print(f\"Error while executing child program: '{python_cmd} -m pybind11 --includes'\")\n exit()\n\n includes = includes.decode().rstrip() # strip trailing newlines (and other whitespace chars)\n print(\"Using Pybind11 includes: '\",includes,\"'\",sep=\"\")\n return includes\n\n nproc = str(cpu_count())\n zero_modules = True\n\n for module in cpp_modules:\n module_path = join(cpp_path, module)\n\n # skip module if there is no Makefile (typical distribution case)\n if not isfile(join(module_path, \"Makefile\")):\n continue\n\n # skip module in certain conditions\n if isfile(join(module_path, module+\".so\")) and isfile(join(module_path, module+\".c_info\")):\n with open(join(module_path, module+\".c_info\"), 'rb') as f:\n info = pickle.load(f)\n if info == python_cmd:\n code_mod_time = max(getmtime(join(module_path, f)) for f in listdir(module_path) if f.endswith(\".cpp\") or f.endswith(\".h\"))\n bin_mod_time = getmtime(join(module_path, module+\".so\"))\n if bin_mod_time + 30 > code_mod_time: # favor not building with a margin of 30s (scenario: we unzip the fcpy project, including the binaries, the modification times are all similar)\n continue\n\n # init: print stuff & get Pybind11 includes\n if zero_modules:\n if exit_on_build:\n print(\"There are C++ modules to build. This player is not allowed to build. Aborting.\")\n exit()\n zero_modules = False\n includes = init()\n\n # build module\n print(f'{f\"Building: {module}... \":40}',end='',flush=True)\n process = subprocess.Popen(['make', '-j'+nproc, 'PYBIND_INCLUDES='+includes], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=module_path)\n (output, err) = process.communicate()\n exit_code = process.wait()\n if exit_code == 0:\n print(\"success!\")\n with open(join(module_path, module+\".c_info\"),\"wb\") as f: # save python version\n pickle.dump(python_cmd, f, protocol=4) # protocol 4 is backward compatible with Python 3.4\n else:\n print(\"Aborting! Building errors:\")\n print(output.decode(), err.decode())\n exit() \n\n if not zero_modules:\n print(\"All modules were built successfully!\\n--------------------------\")\n\n\n def batch_create(self, agent_cls, args_per_player): \n ''' Creates batch of agents '''\n\n for a in args_per_player:\n self.players.append( agent_cls(*a) )\n\n def batch_execute_agent(self, index : slice = slice(None)): \n ''' \n Executes agent normally (including commit & send)\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p in self.players[index]:\n p.think_and_send()\n\n def batch_execute_behavior(self, behavior, index : slice = slice(None)):\n '''\n Executes behavior\n\n Parameters\n ----------\n behavior : str\n name of behavior to execute\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.behavior.execute(behavior)\n\n def batch_commit_and_send(self, index : slice = slice(None)):\n '''\n Commits & sends data to server\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.scom.commit_and_send( p.world.robot.get_command() ) \n\n def batch_receive(self, index : slice = slice(None), update=True):\n ''' \n Waits for server messages\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n update : bool\n update world state based on information received from server\n if False, the agent becomes unaware of itself and its surroundings\n which is useful for reducing cpu resources for dummy agents in demonstrations\n '''\n for p in self.players[index]:\n p.scom.receive(update)\n\n def batch_commit_beam(self, pos2d_and_rotation, index : slice = slice(None)):\n '''\n Beam all player to 2D position with a given rotation\n\n Parameters\n ----------\n pos2d_and_rotation : `list`\n iterable of 2D positions and rotations e.g. [(0,0,45),(-5,0,90)]\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p, pos_rot in zip(self.players[index], pos2d_and_rotation): \n p.scom.commit_beam(pos_rot[0:2],pos_rot[2])\n\n def batch_unofficial_beam(self, pos3d_and_rotation, index : slice = slice(None)):\n '''\n Beam all player to 3D position with a given rotation\n\n Parameters\n ----------\n pos3d_and_rotation : `list`\n iterable of 3D positions and rotations e.g. [(0,0,0.5,45),(-5,0,0.5,90)]\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p, pos_rot in zip(self.players[index], pos3d_and_rotation): \n p.scom.unofficial_beam(pos_rot[0:3],pos_rot[3])\n\n def batch_terminate(self, index : slice = slice(None)):\n '''\n Close all sockets connected to the agent port\n For scripts where the agent lives until the application ends, this is not needed\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.terminate()\n del self.players[index] # delete selection" }, { "identifier": "Draw", "path": "world/commons/Draw.py", "snippet": "class Draw():\n _socket = None\n\n def __init__(self, is_enabled:bool, unum:int, host:str, port:int) -> None:\n self.enabled = is_enabled \n self._is_team_right = None\n self._unum = unum \n self._prefix = f'?{unum}_'.encode() # temporary prefix that should never be used in normal circumstances\n \n #Create one socket for all instances\n if Draw._socket is None:\n Draw._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM )\n Draw._socket.connect((host, port))\n Draw.clear_all()\n\n\n def set_team_side(self, is_right):\n ''' Called by world parser to switch side '''\n '''\n Generate an appropriate player ID\n RoboViz has a bug/feature: we send \"swap buffers for player: 'l_1' and RoboViz\n will swap every buffer that contains 'l_1' in the name, including \n 'l_10' and 'l_11'. To avoid that, we swap the separator to 'l-10', 'l-11'\n '''\n self._is_team_right = is_right\n self._prefix = f\"{'r' if is_right else 'l'}{'_' if self._unum < 10 else '-'}{self._unum}_\".encode() #e.g. b'l_5', b'l-10'\n\n\n @staticmethod\n def _send(msg, id, flush):\n ''' Private method to send message if RoboViz is accessible '''\n try:\n if flush:\n Draw._socket.send(msg + id + b'\\x00\\x00\\x00' + id + b'\\x00')\n else:\n Draw._socket.send(msg + id + b'\\x00')\n except ConnectionRefusedError:\n pass\n\n \n def circle(self, pos2d, radius, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw circle\n\n Examples\n ----------\n Circle in 2D (z=0): circle((-1,2), 3, 2, Draw.Color.red, \"my_circle\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos2d).any(), \"Argument 'pos2d' contains 'nan' values\"\n\n if self._is_team_right:\n pos2d = (-pos2d[0],-pos2d[1]) \n\n msg = b'\\x01\\x00' + (\n f'{f\"{pos2d[0] :.4f}\":.6s}'\n f'{f\"{pos2d[1] :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def line(self, p1, p2, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw line\n\n Examples\n ----------\n Line in 3D: line((0,0,0), (0,0,2), 3, Draw.Color.red, \"my_line\") \n Line in 2D (z=0): line((0,0), (0,1), 3, Draw.Color.red, \"my_line\") \n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(p1).any(), \"Argument 'p1' contains 'nan' values\"\n assert not np.isnan(p2).any(), \"Argument 'p2' contains 'nan' values\"\n\n z1 = p1[2] if len(p1)==3 else 0\n z2 = p2[2] if len(p2)==3 else 0\n\n if self._is_team_right: \n p1 = (-p1[0],-p1[1],p1[2]) if len(p1)==3 else (-p1[0],-p1[1])\n p2 = (-p2[0],-p2[1],p2[2]) if len(p2)==3 else (-p2[0],-p2[1])\n\n msg = b'\\x01\\x01' + (\n f'{f\"{p1[0] :.4f}\":.6s}'\n f'{f\"{p1[1] :.4f}\":.6s}'\n f'{f\"{z1 :.4f}\":.6s}'\n f'{f\"{p2[0] :.4f}\":.6s}'\n f'{f\"{p2[1] :.4f}\":.6s}'\n f'{f\"{z2 :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n\n Draw._send(msg, self._prefix + id.encode(), flush)\n \n\n def point(self, pos, size, color:bytes, id:str, flush=True):\n ''' \n Draw point\n\n Examples\n ----------\n Point in 3D: point((1,1,1), 3, Draw.Color.red, \"my_point\")\n Point in 2D (z=0): point((1,1), 3, Draw.Color.red, \"my_point\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x02' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{size :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def sphere(self, pos, radius, color:bytes, id:str, flush=True):\n ''' \n Draw sphere\n\n Examples\n ----------\n Sphere in 3D: sphere((1,1,1), 3, Draw.Color.red, \"my_sphere\")\n Sphere in 2D (z=0): sphere((1,1), 3, Draw.Color.red, \"my_sphere\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x03' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def polygon(self, vertices, color:bytes, alpha:int, id:str, flush=True):\n ''' \n Draw polygon\n\n Examples\n ----------\n Polygon in 3D: polygon(((0,0,0),(1,0,0),(0,1,0)), Draw.Color.red, 255, \"my_polygon\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert 0<=alpha<=255, \"The alpha channel (degree of opacity) must be in range [0,255]\"\n\n if self._is_team_right: \n vertices = [(-v[0],-v[1],v[2]) for v in vertices]\n\n msg = b'\\x01\\x04' + bytes([len(vertices)]) + color + alpha.to_bytes(1,'big')\n\n for v in vertices:\n msg += (\n f'{f\"{v[0] :.4f}\":.6s}'\n f'{f\"{v[1] :.4f}\":.6s}'\n f'{f\"{v[2] :.4f}\":.6s}').encode()\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def annotation(self, pos, text, color:bytes, id:str, flush=True):\n ''' \n Draw annotation\n\n Examples\n ----------\n Annotation in 3D: annotation((1,1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n Annotation in 2D (z=0): annotation((1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n '''\n if not self.enabled: return\n if type(text) != bytes: text = str(text).encode()\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x02\\x00' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}').encode() + color + text + b'\\x00'\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n \n def arrow(self, p1, p2, arrowhead_size, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw arrow\n\n Examples\n ----------\n Arrow in 3D: arrow((0,0,0), (0,0,2), 0.1, 3, Draw.Color.red, \"my_arrow\")\n Arrow in 2D (z=0): arrow((0,0), (0,1), 0.1, 3, Draw.Color.red, \"my_arrow\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n\n # No need to invert sides, the called shapes will handle that\n if len(p1)==2: p1 = M.to_3d(p1) \n else: p1 = np.asarray(p1)\n if len(p2)==2: p2 = M.to_3d(p2) \n else: p2 = np.asarray(p2)\n\n vec = p2-p1\n vec_size = np.linalg.norm(vec)\n if vec_size == 0: return #return without warning/error\n if arrowhead_size > vec_size: arrowhead_size = vec_size\n\n ground_proj_perpendicular = np.array([ vec[1], -vec[0], 0 ])\n\n if np.all(ground_proj_perpendicular == 0): #vertical arrow\n ground_proj_perpendicular = np.array([ arrowhead_size/2, 0, 0 ])\n else:\n ground_proj_perpendicular *= arrowhead_size/2 / np.linalg.norm(ground_proj_perpendicular)\n\n head_start = p2 - vec * (arrowhead_size/vec_size)\n head_pt1 = head_start + ground_proj_perpendicular\n head_pt2 = head_start - ground_proj_perpendicular\n\n self.line(p1,p2,thickness,color,id,False)\n self.line(p2,head_pt1,thickness,color,id,False)\n self.line(p2,head_pt2,thickness,color,id,flush)\n\n\n def flush(self, id):\n ''' Flush specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), False)\n\n def clear(self, id):\n ''' Clear specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), True) #swap buffer twice\n\n\n def clear_player(self):\n ''' Clear all drawings made by this player '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix, True) #swap buffer twice\n\n\n @staticmethod\n def clear_all():\n ''' Clear all drawings of all players '''\n if Draw._socket is not None:\n Draw._send(b'\\x00\\x00\\x00\\x00\\x00',b'',False) #swap buffer twice using no id\n\n\n class Color():\n '''\n Based on X11 colors\n The names are restructured to make better suggestions\n '''\n pink_violet = b'\\xC7\\x15\\x85'\n pink_hot = b'\\xFF\\x14\\x93'\n pink_violet_pale = b'\\xDB\\x70\\x93'\n pink = b'\\xFF\\x69\\xB4'\n pink_pale = b'\\xFF\\xB6\\xC1'\n \n red_dark = b'\\x8B\\x00\\x00'\n red = b'\\xFF\\x00\\x00'\n red_brick = b'\\xB2\\x22\\x22'\n red_crimson = b'\\xDC\\x14\\x3C'\n red_indian = b'\\xCD\\x5C\\x5C'\n red_salmon = b'\\xFA\\x80\\x72'\n\n orange_red = b'\\xFF\\x45\\x00'\n orange = b'\\xFF\\x8C\\x00'\n orange_ligth = b'\\xFF\\xA5\\x00'\n\n yellow_gold = b'\\xFF\\xD7\\x00'\n yellow = b'\\xFF\\xFF\\x00'\n yellow_light = b'\\xBD\\xB7\\x6B'\n\n brown_maroon =b'\\x80\\x00\\x00'\n brown_dark = b'\\x8B\\x45\\x13'\n brown = b'\\xA0\\x52\\x2D'\n brown_gold = b'\\xB8\\x86\\x0B'\n brown_light = b'\\xCD\\x85\\x3F'\n brown_pale = b'\\xDE\\xB8\\x87'\n\n green_dark = b'\\x00\\x64\\x00' \n green = b'\\x00\\x80\\x00' \n green_lime = b'\\x32\\xCD\\x32' \n green_light = b'\\x00\\xFF\\x00' \n green_lawn = b'\\x7C\\xFC\\x00' \n green_pale = b'\\x90\\xEE\\x90' \n\n cyan_dark = b'\\x00\\x80\\x80' \n cyan_medium = b'\\x00\\xCE\\xD1' \n cyan = b'\\x00\\xFF\\xFF' \n cyan_light = b'\\xAF\\xEE\\xEE'\n\n blue_dark = b'\\x00\\x00\\x8B' \n blue = b'\\x00\\x00\\xFF' \n blue_royal = b'\\x41\\x69\\xE1' \n blue_medium = b'\\x1E\\x90\\xFF' \n blue_light = b'\\x00\\xBF\\xFF'\n blue_pale = b'\\x87\\xCE\\xEB'\n\n purple_violet = b'\\x94\\x00\\xD3' \n purple_magenta = b'\\xFF\\x00\\xFF' \n purple_light = b'\\xBA\\x55\\xD3' \n purple_pale = b'\\xDD\\xA0\\xDD'\n\n white = b'\\xFF\\xFF\\xFF'\n gray_10 = b'\\xE6\\xE6\\xE6'\n gray_20 = b'\\xCC\\xCC\\xCC'\n gray_30 = b'\\xB2\\xB2\\xB2' \n gray_40 = b'\\x99\\x99\\x99'\n gray_50 = b'\\x80\\x80\\x80'\n gray_60 = b'\\x66\\x66\\x66'\n gray_70 = b'\\x4C\\x4C\\x4C'\n gray_80 = b'\\x33\\x33\\x33'\n gray_90 = b'\\x1A\\x1A\\x1A'\n black = b'\\x00\\x00\\x00' \n\n @staticmethod\n def get(r,g,b):\n ''' Get RGB color (0-255) '''\n return bytes([int(r),int(g),int(b)])" } ]
from agent.Base_Agent import Base_Agent as Agent from itertools import count from math_ops.Inverse_Kinematics import Inverse_Kinematics from scripts.commons.Script import Script from world.commons.Draw import Draw import numpy as np
11,392
class Inv_Kinematics(): def __init__(self, script:Script) -> None: self.args = script.args self.last_action = (0,0,0) self.gravity = True # Initial pose is a neutral pose where all angles are 0 leg_y_dev, upper_leg_height, upper_leg_depth, lower_leg_len, _, _ = Inverse_Kinematics.NAO_SPECS_PER_ROBOT[self.args.r] leg_height = upper_leg_height + lower_leg_len self.feet_pose = [ [[upper_leg_depth,leg_y_dev,-leg_height],[0,0,0]], [[upper_leg_depth,-leg_y_dev,-leg_height], [0,0,0]] ] def _user_control(self): while True: inp = input("Command:") if inp == "": return 2 elif inp == ".": return 1 elif inp == "h": self.print_help(); continue elif inp == "g": self.gravity = not self.gravity print("Using gravity:",self.gravity) if self.gravity: return 6 # extra steps for beam to take effect else: return 1 #Check if user input is a value try: val = float(inp) self.feet_pose[self.last_action[0]][self.last_action[1]][self.last_action[2]] = val continue except: pass if inp[0] not in ['l','r'] or inp[1] not in ['x','y','z','X','Y','Z']: print("Illegal command!") continue side = 0 if inp[0]=='l' else 1 pos_rot = 0 if inp[1].islower() else 1 axis = {'x':0,'y':1,'z':2}[inp[1].lower()] self.last_action = (side,pos_rot,axis) try: val = float(inp[2:]) self.feet_pose[side][pos_rot][axis] = val except: print("Illegal value conversion!")
class Inv_Kinematics(): def __init__(self, script:Script) -> None: self.args = script.args self.last_action = (0,0,0) self.gravity = True # Initial pose is a neutral pose where all angles are 0 leg_y_dev, upper_leg_height, upper_leg_depth, lower_leg_len, _, _ = Inverse_Kinematics.NAO_SPECS_PER_ROBOT[self.args.r] leg_height = upper_leg_height + lower_leg_len self.feet_pose = [ [[upper_leg_depth,leg_y_dev,-leg_height],[0,0,0]], [[upper_leg_depth,-leg_y_dev,-leg_height], [0,0,0]] ] def _user_control(self): while True: inp = input("Command:") if inp == "": return 2 elif inp == ".": return 1 elif inp == "h": self.print_help(); continue elif inp == "g": self.gravity = not self.gravity print("Using gravity:",self.gravity) if self.gravity: return 6 # extra steps for beam to take effect else: return 1 #Check if user input is a value try: val = float(inp) self.feet_pose[self.last_action[0]][self.last_action[1]][self.last_action[2]] = val continue except: pass if inp[0] not in ['l','r'] or inp[1] not in ['x','y','z','X','Y','Z']: print("Illegal command!") continue side = 0 if inp[0]=='l' else 1 pos_rot = 0 if inp[1].islower() else 1 axis = {'x':0,'y':1,'z':2}[inp[1].lower()] self.last_action = (side,pos_rot,axis) try: val = float(inp[2:]) self.feet_pose[side][pos_rot][axis] = val except: print("Illegal value conversion!")
def _draw_labels(self, player:Agent):
5
2023-12-16 23:40:23+00:00
16k
daihaojun554/biliscrapy
biliscrapy/network/bilibili_danmu.py
[ { "identifier": "bili_pb2", "path": "biliscrapy/network/protobuf/bili_pb2.py", "snippet": "DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\x08my.proto\\x12 bilibili.community.service.dm.v1\\\"d\\n\\x06\\x41vatar\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\x12\\x41\\n\\x0b\\x61vatar_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.AvatarType\\\"#\\n\\x06\\x42ubble\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\\"\\xc6\\x01\\n\\x08\\x42ubbleV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\x12\\x41\\n\\x0b\\x62ubble_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.BubbleType\\x12\\x15\\n\\rexposure_once\\x18\\x04 \\x01(\\x08\\x12\\x45\\n\\rexposure_type\\x18\\x05 \\x01(\\x0e\\x32..bilibili.community.service.dm.v1.ExposureType\\\"&\\n\\x06\\x42utton\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06\\x61\\x63tion\\x18\\x02 \\x01(\\x05\\\"X\\n\\x0e\\x42uzzwordConfig\\x12\\x46\\n\\x08keywords\\x18\\x01 \\x03(\\x0b\\x32\\x34.bilibili.community.service.dm.v1.BuzzwordShowConfig\\\"x\\n\\x12\\x42uzzwordShowConfig\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06schema\\x18\\x02 \\x01(\\t\\x12\\x0e\\n\\x06source\\x18\\x03 \\x01(\\x05\\x12\\n\\n\\x02id\\x18\\x04 \\x01(\\x03\\x12\\x13\\n\\x0b\\x62uzzword_id\\x18\\x05 \\x01(\\x03\\x12\\x13\\n\\x0bschema_type\\x18\\x06 \\x01(\\x05\\\"{\\n\\x08\\x43heckBox\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12<\\n\\x04type\\x18\\x02 \\x01(\\x0e\\x32..bilibili.community.service.dm.v1.CheckboxType\\x12\\x15\\n\\rdefault_value\\x18\\x03 \\x01(\\x08\\x12\\x0c\\n\\x04show\\x18\\x04 \\x01(\\x08\\\"?\\n\\nCheckBoxV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0c\\n\\x04type\\x18\\x02 \\x01(\\x05\\x12\\x15\\n\\rdefault_value\\x18\\x03 \\x01(\\x08\\\"\\x82\\x02\\n\\x0b\\x43lickButton\\x12\\x15\\n\\rportrait_text\\x18\\x01 \\x03(\\t\\x12\\x16\\n\\x0elandscape_text\\x18\\x02 \\x03(\\t\\x12\\x1b\\n\\x13portrait_text_focus\\x18\\x03 \\x03(\\t\\x12\\x1c\\n\\x14landscape_text_focus\\x18\\x04 \\x03(\\t\\x12\\x41\\n\\x0brender_type\\x18\\x05 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.RenderType\\x12\\x0c\\n\\x04show\\x18\\x06 \\x01(\\x08\\x12\\x38\\n\\x06\\x62ubble\\x18\\x07 \\x01(\\x0b\\x32(.bilibili.community.service.dm.v1.Bubble\\\"\\xd5\\x01\\n\\rClickButtonV2\\x12\\x15\\n\\rportrait_text\\x18\\x01 \\x03(\\t\\x12\\x16\\n\\x0elandscape_text\\x18\\x02 \\x03(\\t\\x12\\x1b\\n\\x13portrait_text_focus\\x18\\x03 \\x03(\\t\\x12\\x1c\\n\\x14landscape_text_focus\\x18\\x04 \\x03(\\t\\x12\\x13\\n\\x0brender_type\\x18\\x05 \\x01(\\x05\\x12\\x17\\n\\x0ftext_input_post\\x18\\x06 \\x01(\\x08\\x12\\x15\\n\\rexposure_once\\x18\\x07 \\x01(\\x08\\x12\\x15\\n\\rexposure_type\\x18\\x08 \\x01(\\x05\\\"\\xa1\\x01\\n\\tCommandDm\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0b\\n\\x03mid\\x18\\x03 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ommand\\x18\\x04 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x05 \\x01(\\t\\x12\\x10\\n\\x08progress\\x18\\x06 \\x01(\\x05\\x12\\r\\n\\x05\\x63time\\x18\\x07 \\x01(\\t\\x12\\r\\n\\x05mtime\\x18\\x08 \\x01(\\t\\x12\\r\\n\\x05\\x65xtra\\x18\\t \\x01(\\t\\x12\\r\\n\\x05idStr\\x18\\n \\x01(\\t\\\"P\\n\\rDanmakuAIFlag\\x12?\\n\\x08\\x64m_flags\\x18\\x01 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuFlag\\\"\\xad\\x02\\n\\x0b\\x44\\x61nmakuElem\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\x03\\x12\\x10\\n\\x08progress\\x18\\x02 \\x01(\\x05\\x12\\x0c\\n\\x04mode\\x18\\x03 \\x01(\\x05\\x12\\x10\\n\\x08\\x66ontsize\\x18\\x04 \\x01(\\x05\\x12\\r\\n\\x05\\x63olor\\x18\\x05 \\x01(\\r\\x12\\x0f\\n\\x07midHash\\x18\\x06 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x07 \\x01(\\t\\x12\\r\\n\\x05\\x63time\\x18\\x08 \\x01(\\x03\\x12\\x0e\\n\\x06weight\\x18\\t \\x01(\\x05\\x12\\x0e\\n\\x06\\x61\\x63tion\\x18\\n \\x01(\\t\\x12\\x0c\\n\\x04pool\\x18\\x0b \\x01(\\x05\\x12\\r\\n\\x05idStr\\x18\\x0c \\x01(\\t\\x12\\x0c\\n\\x04\\x61ttr\\x18\\r \\x01(\\x05\\x12\\x11\\n\\tanimation\\x18\\x16 \\x01(\\t\\x12\\x42\\n\\x08\\x63olorful\\x18\\x18 \\x01(\\x0e\\x32\\x30.bilibili.community.service.dm.v1.DmColorfulType\\\")\\n\\x0b\\x44\\x61nmakuFlag\\x12\\x0c\\n\\x04\\x64mid\\x18\\x01 \\x01(\\x03\\x12\\x0c\\n\\x04\\x66lag\\x18\\x02 \\x01(\\r\\\"K\\n\\x11\\x44\\x61nmakuFlagConfig\\x12\\x10\\n\\x08rec_flag\\x18\\x01 \\x01(\\x05\\x12\\x10\\n\\x08rec_text\\x18\\x02 \\x01(\\t\\x12\\x12\\n\\nrec_switch\\x18\\x03 \\x01(\\x05\\\"\\xe4\\x06\\n\\x18\\x44\\x61nmuDefaultPlayerConfig\\x12)\\n!player_danmaku_use_default_config\\x18\\x01 \\x01(\\x08\\x12,\\n$player_danmaku_ai_recommended_switch\\x18\\x04 \\x01(\\x08\\x12+\\n#player_danmaku_ai_recommended_level\\x18\\x05 \\x01(\\x05\\x12\\x1f\\n\\x17player_danmaku_blocktop\\x18\\x06 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockscroll\\x18\\x07 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockbottom\\x18\\x08 \\x01(\\x08\\x12$\\n\\x1cplayer_danmaku_blockcolorful\\x18\\t \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockrepeat\\x18\\n \\x01(\\x08\\x12#\\n\\x1bplayer_danmaku_blockspecial\\x18\\x0b \\x01(\\x08\\x12\\x1e\\n\\x16player_danmaku_opacity\\x18\\x0c \\x01(\\x02\\x12$\\n\\x1cplayer_danmaku_scalingfactor\\x18\\r \\x01(\\x02\\x12\\x1d\\n\\x15player_danmaku_domain\\x18\\x0e \\x01(\\x02\\x12\\x1c\\n\\x14player_danmaku_speed\\x18\\x0f \\x01(\\x05\\x12$\\n\\x1cinline_player_danmaku_switch\\x18\\x10 \\x01(\\x08\\x12)\\n!player_danmaku_senior_mode_switch\\x18\\x11 \\x01(\\x05\\x12.\\n&player_danmaku_ai_recommended_level_v2\\x18\\x12 \\x01(\\x05\\x12\\x98\\x01\\n*player_danmaku_ai_recommended_level_v2_map\\x18\\x13 \\x03(\\x0b\\x32\\x64.bilibili.community.service.dm.v1.DanmuDefaultPlayerConfig.PlayerDanmakuAiRecommendedLevelV2MapEntry\\x1aK\\n)PlayerDanmakuAiRecommendedLevelV2MapEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\x05\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\\"\\x8f\\x08\\n\\x11\\x44\\x61nmuPlayerConfig\\x12\\x1d\\n\\x15player_danmaku_switch\\x18\\x01 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_switch_save\\x18\\x02 \\x01(\\x08\\x12)\\n!player_danmaku_use_default_config\\x18\\x03 \\x01(\\x08\\x12,\\n$player_danmaku_ai_recommended_switch\\x18\\x04 \\x01(\\x08\\x12+\\n#player_danmaku_ai_recommended_level\\x18\\x05 \\x01(\\x05\\x12\\x1f\\n\\x17player_danmaku_blocktop\\x18\\x06 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockscroll\\x18\\x07 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockbottom\\x18\\x08 \\x01(\\x08\\x12$\\n\\x1cplayer_danmaku_blockcolorful\\x18\\t \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockrepeat\\x18\\n \\x01(\\x08\\x12#\\n\\x1bplayer_danmaku_blockspecial\\x18\\x0b \\x01(\\x08\\x12\\x1e\\n\\x16player_danmaku_opacity\\x18\\x0c \\x01(\\x02\\x12$\\n\\x1cplayer_danmaku_scalingfactor\\x18\\r \\x01(\\x02\\x12\\x1d\\n\\x15player_danmaku_domain\\x18\\x0e \\x01(\\x02\\x12\\x1c\\n\\x14player_danmaku_speed\\x18\\x0f \\x01(\\x05\\x12&\\n\\x1eplayer_danmaku_enableblocklist\\x18\\x10 \\x01(\\x08\\x12$\\n\\x1cinline_player_danmaku_switch\\x18\\x11 \\x01(\\x08\\x12$\\n\\x1cinline_player_danmaku_config\\x18\\x12 \\x01(\\x05\\x12&\\n\\x1eplayer_danmaku_ios_switch_save\\x18\\x13 \\x01(\\x05\\x12)\\n!player_danmaku_senior_mode_switch\\x18\\x14 \\x01(\\x05\\x12.\\n&player_danmaku_ai_recommended_level_v2\\x18\\x15 \\x01(\\x05\\x12\\x91\\x01\\n*player_danmaku_ai_recommended_level_v2_map\\x18\\x16 \\x03(\\x0b\\x32].bilibili.community.service.dm.v1.DanmuPlayerConfig.PlayerDanmakuAiRecommendedLevelV2MapEntry\\x1aK\\n)PlayerDanmakuAiRecommendedLevelV2MapEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\x05\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\\"0\\n\\x16\\x44\\x61nmuPlayerConfigPanel\\x12\\x16\\n\\x0eselection_text\\x18\\x01 \\x01(\\t\\\"K\\n\\x18\\x44\\x61nmuPlayerDynamicConfig\\x12\\x10\\n\\x08progress\\x18\\x01 \\x01(\\x05\\x12\\x1d\\n\\x15player_danmaku_domain\\x18\\x0e \\x01(\\x02\\\"\\x90\\x03\\n\\x15\\x44\\x61nmuPlayerViewConfig\\x12\\x61\\n\\x1d\\x64\\x61nmuku_default_player_config\\x18\\x01 \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.DanmuDefaultPlayerConfig\\x12R\\n\\x15\\x64\\x61nmuku_player_config\\x18\\x02 \\x01(\\x0b\\x32\\x33.bilibili.community.service.dm.v1.DanmuPlayerConfig\\x12\\x61\\n\\x1d\\x64\\x61nmuku_player_dynamic_config\\x18\\x03 \\x03(\\x0b\\x32:.bilibili.community.service.dm.v1.DanmuPlayerDynamicConfig\\x12]\\n\\x1b\\x64\\x61nmuku_player_config_panel\\x18\\x04 \\x01(\\x0b\\x32\\x38.bilibili.community.service.dm.v1.DanmuPlayerConfigPanel\\\"\\xd8\\x04\\n\\x14\\x44\\x61nmuWebPlayerConfig\\x12\\x11\\n\\tdm_switch\\x18\\x01 \\x01(\\x08\\x12\\x11\\n\\tai_switch\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x61i_level\\x18\\x03 \\x01(\\x05\\x12\\x10\\n\\x08\\x62locktop\\x18\\x04 \\x01(\\x08\\x12\\x13\\n\\x0b\\x62lockscroll\\x18\\x05 \\x01(\\x08\\x12\\x13\\n\\x0b\\x62lockbottom\\x18\\x06 \\x01(\\x08\\x12\\x12\\n\\nblockcolor\\x18\\x07 \\x01(\\x08\\x12\\x14\\n\\x0c\\x62lockspecial\\x18\\x08 \\x01(\\x08\\x12\\x14\\n\\x0cpreventshade\\x18\\t \\x01(\\x08\\x12\\r\\n\\x05\\x64mask\\x18\\n \\x01(\\x08\\x12\\x0f\\n\\x07opacity\\x18\\x0b \\x01(\\x02\\x12\\x0e\\n\\x06\\x64marea\\x18\\x0c \\x01(\\x05\\x12\\x11\\n\\tspeedplus\\x18\\r \\x01(\\x02\\x12\\x10\\n\\x08\\x66ontsize\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nscreensync\\x18\\x0f \\x01(\\x08\\x12\\x11\\n\\tspeedsync\\x18\\x10 \\x01(\\x08\\x12\\x12\\n\\nfontfamily\\x18\\x11 \\x01(\\t\\x12\\x0c\\n\\x04\\x62old\\x18\\x12 \\x01(\\x08\\x12\\x12\\n\\nfontborder\\x18\\x13 \\x01(\\x05\\x12\\x11\\n\\tdraw_type\\x18\\x14 \\x01(\\t\\x12\\x1a\\n\\x12senior_mode_switch\\x18\\x15 \\x01(\\x05\\x12\\x13\\n\\x0b\\x61i_level_v2\\x18\\x16 \\x01(\\x05\\x12\\x61\\n\\x0f\\x61i_level_v2_map\\x18\\x17 \\x03(\\x0b\\x32H.bilibili.community.service.dm.v1.DanmuWebPlayerConfig.AiLevelV2MapEntry\\x1a\\x33\\n\\x11\\x41iLevelV2MapEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\x05\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\\"Y\\n\\nDmColorful\\x12>\\n\\x04type\\x18\\x01 \\x01(\\x0e\\x32\\x30.bilibili.community.service.dm.v1.DmColorfulType\\x12\\x0b\\n\\x03src\\x18\\x02 \\x01(\\t\\\"A\\n\\x0f\\x44mExpoReportReq\\x12\\x12\\n\\nsession_id\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\r\\n\\x05spmid\\x18\\x04 \\x01(\\t\\\"\\x11\\n\\x0f\\x44mExpoReportRes\\\"\\xe3\\x0c\\n\\x11\\x44mPlayerConfigReq\\x12\\n\\n\\x02ts\\x18\\x01 \\x01(\\x03\\x12\\x45\\n\\x06switch\\x18\\x02 \\x01(\\x0b\\x32\\x35.bilibili.community.service.dm.v1.PlayerDanmakuSwitch\\x12N\\n\\x0bswitch_save\\x18\\x03 \\x01(\\x0b\\x32\\x39.bilibili.community.service.dm.v1.PlayerDanmakuSwitchSave\\x12[\\n\\x12use_default_config\\x18\\x04 \\x01(\\x0b\\x32?.bilibili.community.service.dm.v1.PlayerDanmakuUseDefaultConfig\\x12\\x61\\n\\x15\\x61i_recommended_switch\\x18\\x05 \\x01(\\x0b\\x32\\x42.bilibili.community.service.dm.v1.PlayerDanmakuAiRecommendedSwitch\\x12_\\n\\x14\\x61i_recommended_level\\x18\\x06 \\x01(\\x0b\\x32\\x41.bilibili.community.service.dm.v1.PlayerDanmakuAiRecommendedLevel\\x12I\\n\\x08\\x62locktop\\x18\\x07 \\x01(\\x0b\\x32\\x37.bilibili.community.service.dm.v1.PlayerDanmakuBlocktop\\x12O\\n\\x0b\\x62lockscroll\\x18\\x08 \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.PlayerDanmakuBlockscroll\\x12O\\n\\x0b\\x62lockbottom\\x18\\t \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.PlayerDanmakuBlockbottom\\x12S\\n\\rblockcolorful\\x18\\n \\x01(\\x0b\\x32<.bilibili.community.service.dm.v1.PlayerDanmakuBlockcolorful\\x12O\\n\\x0b\\x62lockrepeat\\x18\\x0b \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.PlayerDanmakuBlockrepeat\\x12Q\\n\\x0c\\x62lockspecial\\x18\\x0c \\x01(\\x0b\\x32;.bilibili.community.service.dm.v1.PlayerDanmakuBlockspecial\\x12G\\n\\x07opacity\\x18\\r \\x01(\\x0b\\x32\\x36.bilibili.community.service.dm.v1.PlayerDanmakuOpacity\\x12S\\n\\rscalingfactor\\x18\\x0e \\x01(\\x0b\\x32<.bilibili.community.service.dm.v1.PlayerDanmakuScalingfactor\\x12\\x45\\n\\x06\\x64omain\\x18\\x0f \\x01(\\x0b\\x32\\x35.bilibili.community.service.dm.v1.PlayerDanmakuDomain\\x12\\x43\\n\\x05speed\\x18\\x10 \\x01(\\x0b\\x32\\x34.bilibili.community.service.dm.v1.PlayerDanmakuSpeed\\x12W\\n\\x0f\\x65nableblocklist\\x18\\x11 \\x01(\\x0b\\x32>.bilibili.community.service.dm.v1.PlayerDanmakuEnableblocklist\\x12^\\n\\x19inlinePlayerDanmakuSwitch\\x18\\x12 \\x01(\\x0b\\x32;.bilibili.community.service.dm.v1.InlinePlayerDanmakuSwitch\\x12[\\n\\x12senior_mode_switch\\x18\\x13 \\x01(\\x0b\\x32?.bilibili.community.service.dm.v1.PlayerDanmakuSeniorModeSwitch\\x12\\x64\\n\\x17\\x61i_recommended_level_v2\\x18\\x14 \\x01(\\x0b\\x32\\x43.bilibili.community.service.dm.v1.PlayerDanmakuAiRecommendedLevelV2\\\"/\\n\\x0b\\x44mSegConfig\\x12\\x11\\n\\tpage_size\\x18\\x01 \\x01(\\x03\\x12\\r\\n\\x05total\\x18\\x02 \\x01(\\x03\\\"\\xe4\\x01\\n\\x10\\x44mSegMobileReply\\x12<\\n\\x05\\x65lems\\x18\\x01 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuElem\\x12\\r\\n\\x05state\\x18\\x02 \\x01(\\x05\\x12@\\n\\x07\\x61i_flag\\x18\\x03 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.DanmakuAIFlag\\x12\\x41\\n\\x0b\\x63olorfulSrc\\x18\\x05 \\x03(\\x0b\\x32,.bilibili.community.service.dm.v1.DmColorful\\\"\\xa6\\x01\\n\\x0e\\x44mSegMobileReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\x15\\n\\rsegment_index\\x18\\x04 \\x01(\\x03\\x12\\x16\\n\\x0eteenagers_mode\\x18\\x05 \\x01(\\x05\\x12\\n\\n\\x02ps\\x18\\x06 \\x01(\\x03\\x12\\n\\n\\x02pe\\x18\\x07 \\x01(\\x03\\x12\\x11\\n\\tpull_mode\\x18\\x08 \\x01(\\x05\\x12\\x12\\n\\nfrom_scene\\x18\\t \\x01(\\x05\\\"]\\n\\rDmSegOttReply\\x12\\x0e\\n\\x06\\x63losed\\x18\\x01 \\x01(\\x08\\x12<\\n\\x05\\x65lems\\x18\\x02 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuElem\\\"L\\n\\x0b\\x44mSegOttReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\x15\\n\\rsegment_index\\x18\\x04 \\x01(\\x03\\\"]\\n\\rDmSegSDKReply\\x12\\x0e\\n\\x06\\x63losed\\x18\\x01 \\x01(\\x08\\x12<\\n\\x05\\x65lems\\x18\\x02 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuElem\\\"L\\n\\x0b\\x44mSegSDKReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\x15\\n\\rsegment_index\\x18\\x04 \\x01(\\x03\\\"\\xde\\x06\\n\\x0b\\x44mViewReply\\x12\\x0e\\n\\x06\\x63losed\\x18\\x01 \\x01(\\x08\\x12\\x39\\n\\x04mask\\x18\\x02 \\x01(\\x0b\\x32+.bilibili.community.service.dm.v1.VideoMask\\x12\\x41\\n\\x08subtitle\\x18\\x03 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.VideoSubtitle\\x12\\x13\\n\\x0bspecial_dms\\x18\\x04 \\x03(\\t\\x12\\x44\\n\\x07\\x61i_flag\\x18\\x05 \\x01(\\x0b\\x32\\x33.bilibili.community.service.dm.v1.DanmakuFlagConfig\\x12N\\n\\rplayer_config\\x18\\x06 \\x01(\\x0b\\x32\\x37.bilibili.community.service.dm.v1.DanmuPlayerViewConfig\\x12\\x16\\n\\x0esend_box_style\\x18\\x07 \\x01(\\x05\\x12\\r\\n\\x05\\x61llow\\x18\\x08 \\x01(\\x08\\x12\\x11\\n\\tcheck_box\\x18\\t \\x01(\\t\\x12\\x1a\\n\\x12\\x63heck_box_show_msg\\x18\\n \\x01(\\t\\x12\\x18\\n\\x10text_placeholder\\x18\\x0b \\x01(\\t\\x12\\x19\\n\\x11input_placeholder\\x18\\x0c \\x01(\\t\\x12\\x1d\\n\\x15report_filter_content\\x18\\r \\x03(\\t\\x12\\x41\\n\\x0b\\x65xpo_report\\x18\\x0e \\x01(\\x0b\\x32,.bilibili.community.service.dm.v1.ExpoReport\\x12I\\n\\x0f\\x62uzzword_config\\x18\\x0f \\x01(\\x0b\\x32\\x30.bilibili.community.service.dm.v1.BuzzwordConfig\\x12\\x42\\n\\x0b\\x65xpressions\\x18\\x10 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.Expressions\\x12?\\n\\npost_panel\\x18\\x11 \\x03(\\x0b\\x32+.bilibili.community.service.dm.v1.PostPanel\\x12\\x15\\n\\ractivity_meta\\x18\\x12 \\x03(\\t\\x12\\x42\\n\\x0bpost_panel2\\x18\\x13 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.PostPanelV2\\\"X\\n\\tDmViewReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05spmid\\x18\\x04 \\x01(\\t\\x12\\x14\\n\\x0cis_hard_boot\\x18\\x05 \\x01(\\x05\\\"\\xc4\\x04\\n\\x0e\\x44mWebViewReply\\x12\\r\\n\\x05state\\x18\\x01 \\x01(\\x05\\x12\\x0c\\n\\x04text\\x18\\x02 \\x01(\\t\\x12\\x11\\n\\ttext_side\\x18\\x03 \\x01(\\t\\x12=\\n\\x06\\x64m_sge\\x18\\x04 \\x01(\\x0b\\x32-.bilibili.community.service.dm.v1.DmSegConfig\\x12\\x41\\n\\x04\\x66lag\\x18\\x05 \\x01(\\x0b\\x32\\x33.bilibili.community.service.dm.v1.DanmakuFlagConfig\\x12\\x13\\n\\x0bspecial_dms\\x18\\x06 \\x03(\\t\\x12\\x11\\n\\tcheck_box\\x18\\x07 \\x01(\\x08\\x12\\r\\n\\x05\\x63ount\\x18\\x08 \\x01(\\x03\\x12?\\n\\ncommandDms\\x18\\t \\x03(\\x0b\\x32+.bilibili.community.service.dm.v1.CommandDm\\x12M\\n\\rplayer_config\\x18\\n \\x01(\\x0b\\x32\\x36.bilibili.community.service.dm.v1.DanmuWebPlayerConfig\\x12\\x1d\\n\\x15report_filter_content\\x18\\x0b \\x03(\\t\\x12\\x42\\n\\x0b\\x65xpressions\\x18\\x0c \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.Expressions\\x12?\\n\\npost_panel\\x18\\r \\x03(\\x0b\\x32+.bilibili.community.service.dm.v1.PostPanel\\x12\\x15\\n\\ractivity_meta\\x18\\x0e \\x03(\\t\\\"*\\n\\nExpoReport\\x12\\x1c\\n\\x14should_report_at_end\\x18\\x01 \\x01(\\x08\\\"d\\n\\nExpression\\x12\\x0f\\n\\x07keyword\\x18\\x01 \\x03(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\x12\\x38\\n\\x06period\\x18\\x03 \\x03(\\x0b\\x32(.bilibili.community.service.dm.v1.Period\\\"I\\n\\x0b\\x45xpressions\\x12:\\n\\x04\\x64\\x61ta\\x18\\x01 \\x03(\\x0b\\x32,.bilibili.community.service.dm.v1.Expression\\\"*\\n\\x19InlinePlayerDanmakuSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"\\'\\n\\x05Label\\x12\\r\\n\\x05title\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x02 \\x03(\\t\\\"W\\n\\x07LabelV2\\x12\\r\\n\\x05title\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x02 \\x03(\\t\\x12\\x15\\n\\rexposure_once\\x18\\x03 \\x01(\\x08\\x12\\x15\\n\\rexposure_type\\x18\\x04 \\x01(\\x05\\\"$\\n\\x06Period\\x12\\r\\n\\x05start\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03\\x65nd\\x18\\x02 \\x01(\\x03\\\"0\\n\\x1fPlayerDanmakuAiRecommendedLevel\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"2\\n!PlayerDanmakuAiRecommendedLevelV2\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x05\\\"1\\n PlayerDanmakuAiRecommendedSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\")\\n\\x18PlayerDanmakuBlockbottom\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"+\\n\\x1aPlayerDanmakuBlockcolorful\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\")\\n\\x18PlayerDanmakuBlockrepeat\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\")\\n\\x18PlayerDanmakuBlockscroll\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"*\\n\\x19PlayerDanmakuBlockspecial\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"&\\n\\x15PlayerDanmakuBlocktop\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"$\\n\\x13PlayerDanmakuDomain\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x02\\\"-\\n\\x1cPlayerDanmakuEnableblocklist\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"%\\n\\x14PlayerDanmakuOpacity\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x02\\\"+\\n\\x1aPlayerDanmakuScalingfactor\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x02\\\".\\n\\x1dPlayerDanmakuSeniorModeSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x05\\\"#\\n\\x12PlayerDanmakuSpeed\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x05\\\"8\\n\\x13PlayerDanmakuSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\x12\\x12\\n\\ncan_ignore\\x18\\x02 \\x01(\\x08\\\"(\\n\\x17PlayerDanmakuSwitchSave\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\".\\n\\x1dPlayerDanmakuUseDefaultConfig\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"\\x8c\\x03\\n\\tPostPanel\\x12\\r\\n\\x05start\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03\\x65nd\\x18\\x02 \\x01(\\x03\\x12\\x10\\n\\x08priority\\x18\\x03 \\x01(\\x03\\x12\\x0e\\n\\x06\\x62iz_id\\x18\\x04 \\x01(\\x03\\x12\\x44\\n\\x08\\x62iz_type\\x18\\x05 \\x01(\\x0e\\x32\\x32.bilibili.community.service.dm.v1.PostPanelBizType\\x12\\x43\\n\\x0c\\x63lick_button\\x18\\x06 \\x01(\\x0b\\x32-.bilibili.community.service.dm.v1.ClickButton\\x12?\\n\\ntext_input\\x18\\x07 \\x01(\\x0b\\x32+.bilibili.community.service.dm.v1.TextInput\\x12=\\n\\tcheck_box\\x18\\x08 \\x01(\\x0b\\x32*.bilibili.community.service.dm.v1.CheckBox\\x12\\x36\\n\\x05toast\\x18\\t \\x01(\\x0b\\x32\\'.bilibili.community.service.dm.v1.Toast\\\"\\xcb\\x03\\n\\x0bPostPanelV2\\x12\\r\\n\\x05start\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03\\x65nd\\x18\\x02 \\x01(\\x03\\x12\\x10\\n\\x08\\x62iz_type\\x18\\x03 \\x01(\\x05\\x12\\x45\\n\\x0c\\x63lick_button\\x18\\x04 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.ClickButtonV2\\x12\\x41\\n\\ntext_input\\x18\\x05 \\x01(\\x0b\\x32-.bilibili.community.service.dm.v1.TextInputV2\\x12?\\n\\tcheck_box\\x18\\x06 \\x01(\\x0b\\x32,.bilibili.community.service.dm.v1.CheckBoxV2\\x12\\x38\\n\\x05toast\\x18\\x07 \\x01(\\x0b\\x32).bilibili.community.service.dm.v1.ToastV2\\x12:\\n\\x06\\x62ubble\\x18\\x08 \\x01(\\x0b\\x32*.bilibili.community.service.dm.v1.BubbleV2\\x12\\x38\\n\\x05label\\x18\\t \\x01(\\x0b\\x32).bilibili.community.service.dm.v1.LabelV2\\x12\\x13\\n\\x0bpost_status\\x18\\n \\x01(\\x05\\\")\\n\\x08Response\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x0f\\n\\x07message\\x18\\x02 \\x01(\\t\\\"\\xf9\\x02\\n\\x0cSubtitleItem\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\x03\\x12\\x0e\\n\\x06id_str\\x18\\x02 \\x01(\\t\\x12\\x0b\\n\\x03lan\\x18\\x03 \\x01(\\t\\x12\\x0f\\n\\x07lan_doc\\x18\\x04 \\x01(\\t\\x12\\x14\\n\\x0csubtitle_url\\x18\\x05 \\x01(\\t\\x12:\\n\\x06\\x61uthor\\x18\\x06 \\x01(\\x0b\\x32*.bilibili.community.service.dm.v1.UserInfo\\x12<\\n\\x04type\\x18\\x07 \\x01(\\x0e\\x32..bilibili.community.service.dm.v1.SubtitleType\\x12\\x15\\n\\rlan_doc_brief\\x18\\x08 \\x01(\\t\\x12\\x41\\n\\x07\\x61i_type\\x18\\t \\x01(\\x0e\\x32\\x30.bilibili.community.service.dm.v1.SubtitleAiType\\x12\\x45\\n\\tai_status\\x18\\n \\x01(\\x0e\\x32\\x32.bilibili.community.service.dm.v1.SubtitleAiStatus\\\"\\xe8\\x02\\n\\tTextInput\\x12\\x1c\\n\\x14portrait_placeholder\\x18\\x01 \\x03(\\t\\x12\\x1d\\n\\x15landscape_placeholder\\x18\\x02 \\x03(\\t\\x12\\x41\\n\\x0brender_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.RenderType\\x12\\x18\\n\\x10placeholder_post\\x18\\x04 \\x01(\\x08\\x12\\x0c\\n\\x04show\\x18\\x05 \\x01(\\x08\\x12\\x38\\n\\x06\\x61vatar\\x18\\x06 \\x03(\\x0b\\x32(.bilibili.community.service.dm.v1.Avatar\\x12\\x41\\n\\x0bpost_status\\x18\\x07 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.PostStatus\\x12\\x36\\n\\x05label\\x18\\x08 \\x01(\\x0b\\x32\\'.bilibili.community.service.dm.v1.Label\\\"\\xfb\\x01\\n\\x0bTextInputV2\\x12\\x1c\\n\\x14portrait_placeholder\\x18\\x01 \\x03(\\t\\x12\\x1d\\n\\x15landscape_placeholder\\x18\\x02 \\x03(\\t\\x12\\x41\\n\\x0brender_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.RenderType\\x12\\x18\\n\\x10placeholder_post\\x18\\x04 \\x01(\\x08\\x12\\x38\\n\\x06\\x61vatar\\x18\\x05 \\x03(\\x0b\\x32(.bilibili.community.service.dm.v1.Avatar\\x12\\x18\\n\\x10text_input_limit\\x18\\x06 \\x01(\\x05\\\"o\\n\\x05Toast\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08\\x64uration\\x18\\x02 \\x01(\\x05\\x12\\x0c\\n\\x04show\\x18\\x03 \\x01(\\x08\\x12\\x38\\n\\x06\\x62utton\\x18\\x04 \\x01(\\x0b\\x32(.bilibili.community.service.dm.v1.Button\\\"-\\n\\rToastButtonV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06\\x61\\x63tion\\x18\\x02 \\x01(\\x05\\\"s\\n\\x07ToastV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08\\x64uration\\x18\\x02 \\x01(\\x05\\x12H\\n\\x0ftoast_button_v2\\x18\\x03 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.ToastButtonV2\\\"\\\\\\n\\x08UserInfo\\x12\\x0b\\n\\x03mid\\x18\\x01 \\x01(\\x03\\x12\\x0c\\n\\x04name\\x18\\x02 \\x01(\\t\\x12\\x0b\\n\\x03sex\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04\\x66\\x61\\x63\\x65\\x18\\x04 \\x01(\\t\\x12\\x0c\\n\\x04sign\\x18\\x05 \\x01(\\t\\x12\\x0c\\n\\x04rank\\x18\\x06 \\x01(\\x05\\\"S\\n\\tVideoMask\\x12\\x0b\\n\\x03\\x63id\\x18\\x01 \\x01(\\x03\\x12\\x0c\\n\\x04plat\\x18\\x02 \\x01(\\x05\\x12\\x0b\\n\\x03\\x66ps\\x18\\x03 \\x01(\\x05\\x12\\x0c\\n\\x04time\\x18\\x04 \\x01(\\x03\\x12\\x10\\n\\x08mask_url\\x18\\x05 \\x01(\\t\\\"o\\n\\rVideoSubtitle\\x12\\x0b\\n\\x03lan\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06lanDoc\\x18\\x02 \\x01(\\t\\x12\\x41\\n\\tsubtitles\\x18\\x03 \\x03(\\x0b\\x32..bilibili.community.service.dm.v1.SubtitleItem*3\\n\\nAvatarType\\x12\\x12\\n\\x0e\\x41vatarTypeNone\\x10\\x00\\x12\\x11\\n\\rAvatarTypeNFT\\x10\\x01*Y\\n\\nBubbleType\\x12\\x12\\n\\x0e\\x42ubbleTypeNone\\x10\\x00\\x12\\x19\\n\\x15\\x42ubbleTypeClickButton\\x10\\x01\\x12\\x1c\\n\\x18\\x42ubbleTypeDmSettingPanel\\x10\\x02*X\\n\\x0c\\x43heckboxType\\x12\\x14\\n\\x10\\x43heckboxTypeNone\\x10\\x00\\x12\\x19\\n\\x15\\x43heckboxTypeEncourage\\x10\\x01\\x12\\x17\\n\\x13\\x43heckboxTypeColorDM\\x10\\x02*L\\n\\tDMAttrBit\\x12\\x14\\n\\x10\\x44MAttrBitProtect\\x10\\x00\\x12\\x15\\n\\x11\\x44MAttrBitFromLive\\x10\\x01\\x12\\x12\\n\\x0e\\x44MAttrHighLike\\x10\\x02*5\\n\\x0e\\x44mColorfulType\\x12\\x0c\\n\\x08NoneType\\x10\\x00\\x12\\x15\\n\\x0fVipGradualColor\\x10\\xe1\\xd4\\x03*<\\n\\x0c\\x45xposureType\\x12\\x14\\n\\x10\\x45xposureTypeNone\\x10\\x00\\x12\\x16\\n\\x12\\x45xposureTypeDMSend\\x10\\x01*\\xc1\\x01\\n\\x10PostPanelBizType\\x12\\x18\\n\\x14PostPanelBizTypeNone\\x10\\x00\\x12\\x1d\\n\\x19PostPanelBizTypeEncourage\\x10\\x01\\x12\\x1b\\n\\x17PostPanelBizTypeColorDM\\x10\\x02\\x12\\x19\\n\\x15PostPanelBizTypeNFTDM\\x10\\x03\\x12\\x1d\\n\\x19PostPanelBizTypeFragClose\\x10\\x04\\x12\\x1d\\n\\x19PostPanelBizTypeRecommend\\x10\\x05*8\\n\\nPostStatus\\x12\\x14\\n\\x10PostStatusNormal\\x10\\x00\\x12\\x14\\n\\x10PostStatusClosed\\x10\\x01*N\\n\\nRenderType\\x12\\x12\\n\\x0eRenderTypeNone\\x10\\x00\\x12\\x14\\n\\x10RenderTypeSingle\\x10\\x01\\x12\\x16\\n\\x12RenderTypeRotation\\x10\\x02*6\\n\\x10SubtitleAiStatus\\x12\\x08\\n\\x04None\\x10\\x00\\x12\\x0c\\n\\x08\\x45xposure\\x10\\x01\\x12\\n\\n\\x06\\x41ssist\\x10\\x02*+\\n\\x0eSubtitleAiType\\x12\\n\\n\\x06Normal\\x10\\x00\\x12\\r\\n\\tTranslate\\x10\\x01*\\x1e\\n\\x0cSubtitleType\\x12\\x06\\n\\x02\\x43\\x43\\x10\\x00\\x12\\x06\\n\\x02\\x41I\\x10\\x01*N\\n\\x11ToastFunctionType\\x12\\x19\\n\\x15ToastFunctionTypeNone\\x10\\x00\\x12\\x1e\\n\\x1aToastFunctionTypePostPanel\\x10\\x01\\x32\\xa0\\x05\\n\\x02\\x44M\\x12s\\n\\x0b\\x44mSegMobile\\x12\\x30.bilibili.community.service.dm.v1.DmSegMobileReq\\x1a\\x32.bilibili.community.service.dm.v1.DmSegMobileReply\\x12\\x64\\n\\x06\\x44mView\\x12+.bilibili.community.service.dm.v1.DmViewReq\\x1a-.bilibili.community.service.dm.v1.DmViewReply\\x12q\\n\\x0e\\x44mPlayerConfig\\x12\\x33.bilibili.community.service.dm.v1.DmPlayerConfigReq\\x1a*.bilibili.community.service.dm.v1.Response\\x12j\\n\\x08\\x44mSegOtt\\x12-.bilibili.community.service.dm.v1.DmSegOttReq\\x1a/.bilibili.community.service.dm.v1.DmSegOttReply\\x12j\\n\\x08\\x44mSegSDK\\x12-.bilibili.community.service.dm.v1.DmSegSDKReq\\x1a/.bilibili.community.service.dm.v1.DmSegSDKReply\\x12t\\n\\x0c\\x44mExpoReport\\x12\\x31.bilibili.community.service.dm.v1.DmExpoReportReq\\x1a\\x31.bilibili.community.service.dm.v1.DmExpoReportResb\\x06proto3')" }, { "identifier": "bili_utils", "path": "biliscrapy/network/bilibili_utils.py", "snippet": "class bili_utils:\n def __init__(self):\n self.logger = logging.getLogger('log')\n self.header = headers\n self.script_dir = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')\n with open(file_path, 'r') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n\n def bv_get(self, bvorurl):\n # https://api.bilibili.com/x/web-interface/view?bvid=BV1uG41197Tf\n # 将bv提取出来\n bv_identifier = \"BV\" # BV号的标识符\n if \"http://\" in bvorurl or \"https://\" in bvorurl: # 检查是否是一个URL\n self.logger.info(\"你输入的是http链接,正在解析...\")\n bv_index = bvorurl.find(bv_identifier)\n if bv_index != -1: # 如果找到了BV号\n bv = bvorurl[bv_index:bv_index + len(bv_identifier) + 10] # 提取BV号\n self.logger.info(f\"BV号为......: {bv}\")\n return bv\n else:\n self.logger.info(\"你输入的链接地址有误!\")\n return\n elif bv_identifier in bvorurl: # 如果输入的是BV号\n self.logger.info(f\"你输入的是BV号{bvorurl},正在解析...\")\n bv = bvorurl\n return bv\n else:\n self.logger.info(f\"请输入正确的链接地址或BV号!,{bvorurl}\")\n return \"BV1111111111\"\n\n '''\n av 就是 oid 评论里面的参数\n '''\n\n def bv2av(self, bv):\n bv2av_url = 'https://api.bilibili.com/x/web-interface/view?bvid='\n if bv.startswith(\"BV\"):\n url = bv2av_url + str(bv)\n retry_count = 0\n max_retries = 10\n retry_delay = 1 # seconds\n while retry_count < max_retries:\n try:\n response = requests.get(url,headers=headers,cookies=self.cookies)\n response.raise_for_status() # 检查请求是否成功\n data = response.json()\n # self.logger.info(data)\n if 'data' in data and 'aid' in data['data']:\n avid = data['data']['aid']\n self.logger.info(f\"找到的avid{avid}\")\n return avid\n else:\n self.logger.info(\"未找到有效的aid值,正在重新尝试获取...\")\n retry_count += 1\n time.sleep(retry_delay)\n except (requests.RequestException, ValueError) as e:\n self.logger.info(f\"请求发生错误:{e}\")\n retry_count += 1\n self.logger.info(\"服务器返回错误!请稍后再试!\")\n self.logger.info(f\"正在重新尝试获取aid,尝试次数==>{retry_count}\")\n time.sleep(retry_delay)\n\n return None\n\n '''\n cid 是弹幕用的参数\n '''\n\n def bv2cid(self, bv):\n url = f\"https://api.bilibili.com/x/player/pagelist?bvid={str(bv)}&jsonp=jsonp\"\n retry_count = 1\n json_s = requests.get(url,headers=headers,cookies=self.cookies).json()\n self.logger.info(\"bv====》\"+bv)\n if json_s['code'] == 0:\n cid = json_s['data'][0]['cid']\n self.logger.info(\"提取出来的cid是:\" + str(cid))\n return cid\n else:\n self.logger.error(\"服务器返回错误!请稍后再试!\")\n retry_count+=1\n if retry_count > 10:\n self.logger.error(\"尝试次数过多,请稍后再试!\")\n return None\n else:\n self.logger.error(\"正在重新尝试获取cid,尝试次数==>\" + str(retry_count))\n return self.bv2cid(bv)\n\n def get_bilibili_cookies(self):\n options = webdriver.ChromeOptions()\n # options.add_argument('--headless')\n # options.add_argument('--disable-gpu')\n # 动态获取路径 不用每次都手动输入路径\n # chromedriver.exe 的路径\n # 获取当前脚本的绝对路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n\n # 构建 chromedriver 的绝对路径\n driver_path = os.path.join(current_path, 'chromedriver.exe')\n\n # 创建 WebDriver 服务\n service = Service(driver_path)\n # service = Service('./chromedriver.exe')\n options.add_argument('--no-sandbox')\n options.binary_location='C:\\\\Program Files\\\\Google\\\\chrome-win64\\\\chrome.exe'\n driver = webdriver.Chrome(options=options, service=service)\n\n # 打开 Bilibili 网站\n driver.get('https://www.bilibili.com/')\n #\n login_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\n '#i_cecream > div.bili-feed4 > div.bili-header.large-header > div.bili-header__bar > ul.right-entry > li:nth-child(1) > li > div.right-entry__outside.go-login-btn')))\n login_btn.click()\n # 等待登录完成成\n time.sleep(10)\n driver.get('https://www.bilibili.com/')\n # 在这里,模拟登录流程(需要输入账号和密码)\n # 扫码登录然后,等待完成,完成的条件是屏幕上出现了某个\n\n search = WebDriverWait(driver, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#nav-searchform > div.nav-search-btn')))\n search.click()\n time.sleep(3)\n cookies = driver.get_cookies()\n # 获取当前脚本的路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(current_path, 'bilibili_cookies.json'), 'w') as f:\n # 写入当前文件\n f.write(json.dumps(cookies))\n # 写入成功\n self.logger.info('写入成功{}'.format(cookies))\n driver.quit()\n return\n\n def get_info_by_bv(self, bv):\n url = f\"https://api.bilibili.com/x/web-interface/view?bvid={str(bv)}\"\n\n def try_get(url):\n try:\n response = requests.get(url, headers=self.header, cookies=self.cookies)\n js_str = response.json()\n if js_str.get('code', 0) == 0:\n return js_str['data']\n else:\n # 可能需要根据API的设计,记录不同的错误\n self.logger.error(\n f\"Video API returned non-success code: {js_str.get('code', 'Unknown')} with message: {js_str.get('msg', 'Unknown')}\")\n except requests.exceptions.RequestException as e:\n self.logger.error(f\"An error occurred: {e}\")\n return None\n\n result = None\n retry_count = 10\n for _ in range(retry_count):\n result = try_get(url)\n if result:\n break\n\n return result\n\n # 检查url是否合法\n def check_url(self, url):\n if url.startswith(\"BV\"):\n return True\n elif url.startswith(\"https://www.bilibili.com/\"):\n return True\n else:\n return False" } ]
import logging import os import json import sys import requests from datetime import datetime from .protobuf import bili_pb2 as Danmaku from .bilibili_utils import bili_utils
12,925
headers = { 'authority': 'message.bilibili.com', 'accept': 'application/json, text/plain, */*', 'accept-language': 'zh-CN,zh;q=0.9', 'cache-control': 'no-cache', 'origin': 'https://www.bilibili.com', 'pragma': 'no-cache', 'referer': 'https://www.bilibili.com/', 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } # import bili_pb2 as Danmaku class Danmu: def __init__(self): self.utils = bili_utils() self.script_dir = os.path.dirname(os.path.abspath(__file__)) # 构建文件路径 file_path = os.path.join(self.script_dir, 'bilibili_cookies.json') if not file_path: self.cookies = {} with open(file_path, 'r') as file: self.cookies_data = json.load(file) self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data} self.headers = headers self.logger = logging.getLogger('log') def bv2cid(self, bvorurl): try: bv = self.utils.bv_get(bvorurl) cid = self.utils.bv2cid(bv) return cid except Exception as e: self.logger.error(e) return None # 获取某个 oid 下存在弹幕的日期列表 def get_available_dates(self, oid, year=None, month=None): if not year or not month: now = datetime.now() year = now.year month = now.month # 如果month 是1.2.3.4.5.6.7.8.9 前面补0 if month < 10: month = '0' + str(month) url = f'https://api.bilibili.com/x/v2/dm/history/index?type=1&oid={oid}&month={year}-{month}' response = requests.get(url, cookies=self.cookies, headers=self.headers) if response.status_code == 200: data = response.json() return data.get("data", []) else: self.logger.error("请检查你输入的 oid 号码!!") self.logger.error(f"当前请求的 URL 为: {url}") return [] ''' 下载某个视频的弹幕文件 ''' def down_so_files(self, oid, dates): if dates == None: return if oid == None: self.logger.info("请输入正确的 oid 号码!!") return if not os.path.exists(os.path.join(self.script_dir, 'data/danmaku')): os.mkdir(os.path.join(self.script_dir, 'data/danmaku')) elif dates: url = f'https://api.bilibili.com/x/v2/dm/web/history/seg.so?type=1&oid={oid}' for date in dates: url_ = f'{url}&date={date}' self.logger.info(f"正在下载 {oid}-{date}.so 文件,请稍后...") response = requests.get(url_, cookies=self.cookies, headers=self.headers) if response.status_code == 200: with open(os.path.join(self.script_dir, 'data/danmaku/', f'{oid}-{date}.so'), 'wb') as f: f.write(response.content) else: self.logger.info("请检查你输入的 oid 号码!!") self.logger.info(f"当前请求的 URL 为: {url}") return self.logger.info(f"下载完成!") # 将.so文件解析并保存为JSON文件 def parse_so_to_json(self, oid, dates): try: if dates == None: self.logger.error("日期为空") return all_danmaku = set() # 用集合存储所有弹幕数据 for date in dates: file_path = os.path.join(self.script_dir, 'data/danmaku/', f'{oid}-{date}.so') with open(file_path, 'rb') as f: data = f.read()
headers = { 'authority': 'message.bilibili.com', 'accept': 'application/json, text/plain, */*', 'accept-language': 'zh-CN,zh;q=0.9', 'cache-control': 'no-cache', 'origin': 'https://www.bilibili.com', 'pragma': 'no-cache', 'referer': 'https://www.bilibili.com/', 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } # import bili_pb2 as Danmaku class Danmu: def __init__(self): self.utils = bili_utils() self.script_dir = os.path.dirname(os.path.abspath(__file__)) # 构建文件路径 file_path = os.path.join(self.script_dir, 'bilibili_cookies.json') if not file_path: self.cookies = {} with open(file_path, 'r') as file: self.cookies_data = json.load(file) self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data} self.headers = headers self.logger = logging.getLogger('log') def bv2cid(self, bvorurl): try: bv = self.utils.bv_get(bvorurl) cid = self.utils.bv2cid(bv) return cid except Exception as e: self.logger.error(e) return None # 获取某个 oid 下存在弹幕的日期列表 def get_available_dates(self, oid, year=None, month=None): if not year or not month: now = datetime.now() year = now.year month = now.month # 如果month 是1.2.3.4.5.6.7.8.9 前面补0 if month < 10: month = '0' + str(month) url = f'https://api.bilibili.com/x/v2/dm/history/index?type=1&oid={oid}&month={year}-{month}' response = requests.get(url, cookies=self.cookies, headers=self.headers) if response.status_code == 200: data = response.json() return data.get("data", []) else: self.logger.error("请检查你输入的 oid 号码!!") self.logger.error(f"当前请求的 URL 为: {url}") return [] ''' 下载某个视频的弹幕文件 ''' def down_so_files(self, oid, dates): if dates == None: return if oid == None: self.logger.info("请输入正确的 oid 号码!!") return if not os.path.exists(os.path.join(self.script_dir, 'data/danmaku')): os.mkdir(os.path.join(self.script_dir, 'data/danmaku')) elif dates: url = f'https://api.bilibili.com/x/v2/dm/web/history/seg.so?type=1&oid={oid}' for date in dates: url_ = f'{url}&date={date}' self.logger.info(f"正在下载 {oid}-{date}.so 文件,请稍后...") response = requests.get(url_, cookies=self.cookies, headers=self.headers) if response.status_code == 200: with open(os.path.join(self.script_dir, 'data/danmaku/', f'{oid}-{date}.so'), 'wb') as f: f.write(response.content) else: self.logger.info("请检查你输入的 oid 号码!!") self.logger.info(f"当前请求的 URL 为: {url}") return self.logger.info(f"下载完成!") # 将.so文件解析并保存为JSON文件 def parse_so_to_json(self, oid, dates): try: if dates == None: self.logger.error("日期为空") return all_danmaku = set() # 用集合存储所有弹幕数据 for date in dates: file_path = os.path.join(self.script_dir, 'data/danmaku/', f'{oid}-{date}.so') with open(file_path, 'rb') as f: data = f.read()
my_seg = Danmaku.DmSegMobileReply()
0
2023-12-14 10:14:24+00:00
16k
Angryrou/udao
udao/optimization/tests/moo/test_weighted_sum.py
[ { "identifier": "logger", "path": "udao/utils/logging.py", "snippet": "def _get_logger(name: str = \"udao\", level: int = logging.DEBUG) -> logging.Logger:" }, { "identifier": "Constraint", "path": "udao/optimization/concepts/constraint.py", "snippet": "class Constraint:\n \"\"\"An optimization element is either an objective or a constraint.\n\n The choice of the type depends on whether a DataProcessor is specified\n for the problem:\n - if no DataProcessor is provided: UdaoFunction, it is a callable\n that takes input_variables and input_parameters\n - else, th.nn.Module or other Callable returning a tensor.\n\n Parameters\n ----------\n function : Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]]\n Objective function, either a UdaoFunction\n or a th.nn.Module if a DataProcessor is provided\n lower : Optional[float], optional\n lower bound of the element, by default None\n upper : Optional[float], optional\n upper bound of the element, by default None\n \"\"\"\n\n def __init__(\n self,\n function: Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]],\n lower: Optional[float] = None,\n upper: Optional[float] = None,\n ) -> None:\n if isinstance(function, th.nn.Module):\n function.eval()\n for p in function.parameters():\n p.requires_grad = False\n self.function = function\n self.lower = lower\n self.upper = upper\n\n def __call__(self, *args: Any, **kwargs: Any) -> th.Tensor:\n return self.function(*args, **kwargs)\n\n def to(self, device: Optional[th.device]) -> \"Constraint\":\n if isinstance(self.function, th.nn.Module) and device is not None:\n self.function.to(device)\n return self\n\n def __repr__(self) -> str:\n return f\"Constraint(lower={self.lower}, upper={self.upper})\"" }, { "identifier": "Objective", "path": "udao/optimization/concepts/objective.py", "snippet": "class Objective(Constraint):\n \"\"\"\n\n Parameters\n ----------\n name : str\n Name of the objective.\n minimize : bool\n Direction of the objective: if True, minimize, else maximize.\n type: VarTypes\n Type of the objective, by default VarTypes.FLOAT\n \"\"\"\n\n def __init__(\n self,\n name: str,\n minimize: bool,\n function: Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]],\n lower: Optional[float] = None,\n upper: Optional[float] = None,\n type: VarTypes = VarTypes.FLOAT,\n ):\n super().__init__(function=function, lower=lower, upper=upper)\n self.name = name\n self.minimize = minimize\n self.type = type\n\n @property\n def direction(self) -> int:\n \"\"\"Get gradient direction from optimization type\"\"\"\n if self.minimize:\n return 1\n else:\n return -1\n\n def __repr__(self) -> str:\n return (\n f\"Objective(name={self.name}, \"\n f\"direction={'min' if self.minimize else 'max'}, \"\n f\"lower={self.lower}, upper={self.upper})\"\n )" }, { "identifier": "MOProblem", "path": "udao/optimization/concepts/problem.py", "snippet": "class MOProblem(BaseProblem):\n \"\"\"Multi-objective optimization problem.\"\"\"\n\n def __init__(\n self,\n objectives: Sequence[Objective],\n variables: Dict[str, Variable],\n constraints: Sequence[Constraint],\n data_processor: Optional[DataProcessor] = None,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.objectives = objectives\n super().__init__(\n variables,\n constraints,\n data_processor=data_processor,\n input_parameters=input_parameters,\n )\n\n def __repr__(self) -> str:\n return (\n f\"MOProblem(objectives={self.objectives}, \"\n f\"variables={self.variables}, \"\n f\"constraints={self.constraints}, \"\n f\"input_parameters={self.input_parameters})\"\n )" }, { "identifier": "WeightedSum", "path": "udao/optimization/moo/weighted_sum.py", "snippet": "class WeightedSum(MOSolver):\n \"\"\"\n Weighted Sum (WS) algorithm for MOO\n\n Parameters\n ----------\n ws_pairs: np.ndarray,\n weight settings for all objectives, of shape (n_weights, n_objs)\n inner_solver: BaseSolver,\n the solver used in Weighted Sum\n objectives: List[Objective],\n objective functions\n constraints: List[Constraint],\n constraint functions\n\n \"\"\"\n\n @dataclass\n class Params:\n ws_pairs: np.ndarray\n \"\"\"weight sets for all objectives, of shape (n_weights, n_objs)\"\"\"\n so_solver: SOSolver\n \"\"\"solver for SOO\"\"\"\n normalize: bool = True\n \"\"\"whether to normalize objective values to [0, 1] before applying WS\"\"\"\n allow_cache: bool = False\n \"\"\"whether to cache the objective values\"\"\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n\n def __init__(\n self,\n params: Params,\n ):\n super().__init__()\n self.so_solver = params.so_solver\n self.ws_pairs = params.ws_pairs\n self.allow_cache = params.allow_cache\n self.normalize = params.normalize\n self.device = params.device\n\n if self.allow_cache and isinstance(params.so_solver, MOGD):\n raise NotImplementedError(\n \"MOGD does not support caching.\" \"Please set allow_cache=False.\"\n )\n\n def solve(\n self, problem: MOProblem, seed: Optional[int] = None\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"solve MOO problem by Weighted Sum (WS)\n\n Parameters\n ----------\n variables : List[Variable]\n List of the variables to be optimized.\n input_parameters : Optional[Dict[str, Any]]\n Fixed input parameters expected by\n the objective functions.\n\n Returns\n -------\n Tuple[Optional[np.ndarray],Optional[np.ndarray]]\n Pareto solutions and corresponding variables.\n \"\"\"\n candidate_points: List[Point] = []\n objective = WeightedSumObjective(\n problem, self.ws_pairs[0], self.allow_cache, self.normalize, self.device\n )\n so_problem = problem.derive_SO_problem(objective)\n for i, ws in enumerate(self.ws_pairs):\n objective.ws = ws\n _, soo_vars = self.so_solver.solve(\n so_problem,\n seed=seed + i * (not self.allow_cache) if seed is not None else None,\n )\n\n objective_values = np.array(\n [\n problem.apply_function(obj, soo_vars, device=self.device)\n .cpu()\n .numpy()\n for obj in problem.objectives\n ]\n ).T.squeeze()\n\n candidate_points.append(Point(objective_values, soo_vars))\n\n return moo_ut.summarize_ret(\n [point.objs for point in candidate_points],\n [point.vars for point in candidate_points],\n )" }, { "identifier": "GridSearchSolver", "path": "udao/optimization/soo/grid_search_solver.py", "snippet": "class GridSearchSolver(SamplerSolver):\n \"\"\"Solving a SOO problem by grid search over variables\"\"\"\n\n @dataclass\n class Params:\n n_grids_per_var: List[int]\n \"\"\"List of grid sizes for each variable\"\"\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n\n def __init__(self, params: Params) -> None:\n \"\"\"\n :param gs_params: dict, the parameters used in grid_search\n \"\"\"\n super().__init__(params.device)\n self.n_grids_per_var = params.n_grids_per_var\n\n def _process_variable(self, var: Variable, n_grids: int) -> np.ndarray:\n \"\"\"Define grid point in fonction of the variable type\"\"\"\n if isinstance(var, NumericVariable):\n # make sure the grid point is the same with the type\n # e.g., if int x.min=0, x.max=5, n_grids_per_var=10,\n # ONLY points[0, 1, 2, 3, 4, 5] are feasible\n if isinstance(var, IntegerVariable):\n if n_grids > (var.upper - var.lower + 1):\n n_grids = int(var.upper - var.lower + 1)\n\n var_grid = np.linspace(var.lower, var.upper, num=n_grids, endpoint=True)\n if isinstance(var, IntegerVariable):\n return np.round(var_grid).astype(int)\n return var_grid\n elif isinstance(var, EnumVariable):\n return np.array(var.values)\n else:\n raise NotImplementedError(\n f\"ERROR: variable type {type(var)} is not supported!\"\n )\n\n def _get_input(\n self, variables: Mapping[str, Variable], seed: Optional[int] = None\n ) -> Dict[str, np.ndarray]:\n \"\"\"\n Generate grids for each variable\n\n Parameters\n ----------\n variables: Mapping[str, Variable]\n variables to generate\n\n Returns\n -------\n Dict[str, np.ndarray]\n Dict with array of values for each variable\n \"\"\"\n grids_list = []\n variable_names = list(variables.keys())\n\n for i, var_name in enumerate(variable_names):\n var = variables[var_name]\n var_n_grids = self.n_grids_per_var[i]\n grids_list.append({var_name: self._process_variable(var, var_n_grids)})\n\n values_list = [list(d.values())[0] for d in grids_list]\n cartesian_product = np.array([list(i) for i in itertools.product(*values_list)])\n result_dict = {\n var_name: cartesian_product.T[i]\n for i, var_name in enumerate(variable_names)\n }\n\n return result_dict" }, { "identifier": "MOGD", "path": "udao/optimization/soo/mogd.py", "snippet": "class MOGD(SOSolver):\n \"\"\"MOGD solver for single-objective optimization.\n\n Performs gradient descent on input variables by minimizing an\n objective loss and a constraint loss.\n \"\"\"\n\n @dataclass\n class Params:\n learning_rate: float\n \"\"\"learning rate of Adam optimizer applied to input variables\"\"\"\n max_iters: int\n \"\"\"maximum number of iterations for a single local search\"\"\"\n patience: int\n \"\"\"maximum number of iterations without improvement\"\"\"\n multistart: int\n \"\"\"number of random starts for gradient descent\"\"\"\n objective_stress: float = 10.0\n \"\"\"stress term for objective functions\"\"\"\n constraint_stress: float = 1e5\n \"\"\"stress term for constraint functions\"\"\"\n strict_rounding: bool = False\n \"\"\"whether strictly rounding integer variables at each iteration. \"\"\"\n batch_size: int = 1\n \"\"\"batch size for gradient descent\"\"\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n dtype: th.dtype = th.float32\n \"\"\"type of the tensors\"\"\"\n\n def __init__(self, params: Params) -> None:\n super().__init__()\n self.lr = params.learning_rate\n self.max_iter = params.max_iters\n self.patience = params.patience\n self.multistart = params.multistart\n self.objective_stress = params.objective_stress\n self.constraint_stress = params.constraint_stress\n self.strict_rounding = params.strict_rounding\n self.batch_size = params.batch_size\n self.device = params.device\n self.dtype = params.dtype\n\n def _get_unprocessed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[Dict[str, th.Tensor], Dict[str, Any]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables for which to get random values\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[Dict[str, th.Tensor], Dict[str, Any]]\n - random values as a tensor for each numeric variable\n - input parameters valuies\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n return derive_unprocessed_input(\n input_variables=numeric_values,\n input_parameters=input_parameters,\n device=self.device,\n )\n\n def _get_processed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[UdaoInput, UdaoItemShape, Callable[[th.Tensor], TabularContainer]]:\n \"\"\"Get random values for numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInputShape, Callable[[th.Tensor], TabularContainer]]\n - random values for numeric variables\n - shape of the input\n - function to convert a tensor to a TabularContainer\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n input_data, iterator = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n make_tabular_container = cast(\n UdaoIterator, iterator\n ).get_tabular_features_container\n\n input_data_shape = iterator.shape\n\n return (\n input_data,\n input_data_shape,\n make_tabular_container,\n )\n\n def _get_unprocessed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n ) -> Tuple[Dict[str, float], Dict[str, float]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Variables for which to get bounds\n\n Returns\n -------\n Tuple[Dict[str, float], Dict[str, float]]\n - lower bounds of numeric variables\n - upper bounds of numeric variables\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n return lower_numeric_values, upper_numeric_values\n\n def _get_processed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> Tuple[UdaoInput, UdaoInput]:\n \"\"\"Get bounds of numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Input parameters, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInput]\n Lower and upper bounds of numeric\n variables in the form of a UdaoInput\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n lower_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=lower_numeric_values,\n )\n upper_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=upper_numeric_values,\n )\n if self.device:\n return lower_input.to(self.device), upper_input.to(self.device)\n else:\n return lower_input, upper_input\n\n def _gradient_descent(\n self,\n problem: co.SOProblem,\n input_data: Union[UdaoInput, Dict],\n optimizer: th.optim.Optimizer,\n ) -> Tuple[int, float, float]:\n \"\"\"Perform a gradient descent step on input variables\n\n Parameters\n ----------\n problem : co.SOProblem\n Single-objective optimization problem\n input_data : Union[UdaoInput, Dict]\n Input data - can have different types depending on whether\n the input variables are processed or not.\n - UdaoInput: the naive input\n - Dict: {\"input_variables\": ..., \"input_parameters\": ...}\n\n optimizer : th.optim.Optimizer\n PyTorch optimizer\n\n Returns\n -------\n Tuple[int, float, float]\n - index of minimum loss\n - minimum loss\n - objective value at minimum loss\n\n Raises\n ------\n UncompliantSolutionError\n If no solution within bounds is found\n \"\"\"\n # Compute objective, constraints and corresponding losses\n\n loss_meta = self._compute_loss(problem, input_data)\n sum_loss = loss_meta[\"sum_loss\"]\n min_loss = loss_meta[\"min_loss\"]\n min_loss_id = loss_meta[\"min_loss_id\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n\n optimizer.zero_grad()\n sum_loss.backward() # type: ignore\n optimizer.step()\n\n if is_within_constraint and (\n self.within_objective_bounds(best_obj, problem.objective)\n ):\n return min_loss_id, min_loss, best_obj\n else:\n raise UncompliantSolutionError(\"No solution within bounds found!\")\n\n def _log_success(\n self,\n problem: co.SOProblem,\n iter: int,\n best_obj: float,\n best_iter: int,\n best_feature_input: Any,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, best local {problem.objective.name} \"\n f\"found {best_obj:.5f}\"\n f\" \\nat iteration {best_iter},\"\n f\" \\nwith vars: {best_feature_input}, for \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _log_failure(\n self,\n problem: co.SOProblem,\n iter: int,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, no valid {problem.objective.name}\"\n f\" found for input parameters {problem.input_parameters} with \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _unprocessed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n no data processor is defined.\n The input variables are transformed to a dictionary of tensors and are\n optimized directly, by being passed to the objective function along\n with the input parameters.\n \"\"\"\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[Dict[str, th.Tensor]] = None\n\n (\n input_variable_values,\n input_parameter_values,\n ) = self._get_unprocessed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n lower_input, upper_input = self._get_unprocessed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables)\n )\n for name in input_variable_values:\n input_variable_values[name].requires_grad_(True)\n optimizer = optim.Adam([t for t in input_variable_values.values()], lr=self.lr)\n i = 0\n while i < self.max_iter:\n with th.no_grad():\n input_variable_values_backup = {\n k: v.detach().clone() for k, v in input_variable_values.items()\n }\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n {\n \"input_variables\": input_variable_values,\n \"input_parameters\": input_parameter_values,\n },\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = {\n k: v[min_loss_id].reshape(1, -1)\n for k, v in input_variable_values_backup.items()\n }\n best_iter = i\n\n with th.no_grad():\n # Update input_variable_values with constrained values\n for k in input_variable_values:\n input_variable_values[k].data = th.clip(\n input_variable_values[k].data,\n lower_input[k],\n upper_input[k],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n for k in input_variable_values:\n if isinstance(problem.variables[k], co.IntegerVariable):\n input_variable_values[k].data = input_variable_values[\n k\n ].data.round()\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n if not self.strict_rounding:\n for k in best_feature_input:\n if isinstance(problem.variables[k], co.IntegerVariable):\n best_feature_input[k].data = best_feature_input[k].data.round()\n loss_meta = self._compute_loss(\n problem,\n {\n \"input_variables\": best_feature_input,\n \"input_parameters\": input_parameter_values,\n },\n )\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n\n best_raw_vars = {\n name: best_feature_input[name]\n .cpu()\n .numpy()\n .squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _processed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n a data processor is defined.\n\n input variables and parameters are processed by the data processor.\n Gradient descent is performed on the processed input variables.\n Variables are then inverse transformed to get the raw variables.\n \"\"\"\n if not problem.data_processor:\n raise Exception(\"Data processor is not defined!\")\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[th.Tensor] = None\n # Random numeric variables and their characteristics\n (\n input_data,\n input_data_shape,\n make_tabular_container,\n ) = self._get_processed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n # Bounds of numeric variables\n lower_input, upper_input = self._get_processed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n )\n # Indices of numeric variables on which to apply gradients\n mask = th.tensor(\n [i in problem.variables for i in input_data_shape.feature_names],\n device=self.device,\n )\n grad_indices = th.nonzero(mask, as_tuple=False).squeeze()\n input_vars_subvector = input_data.features[:, grad_indices].clone().detach()\n input_vars_subvector.requires_grad_(True)\n\n optimizer = optim.Adam([input_vars_subvector], lr=self.lr)\n i = 0\n while i < self.max_iter:\n input_data.features = input_data.features.clone().detach()\n input_data.features[:, grad_indices] = input_vars_subvector\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n input_data,\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = (\n input_data.features.detach()[min_loss_id].clone().reshape(1, -1)\n )\n best_iter = i\n\n with th.no_grad():\n # Update input_vars_subvector with constrained values\n input_vars_subvector.data = th.clip(\n input_vars_subvector.data,\n # Use .data to avoid gradient tracking during update\n lower_input.features[0, grad_indices],\n upper_input.features[0, grad_indices],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n input_data.features[:, grad_indices] = input_vars_subvector.data\n feature_container = make_tabular_container(\n input_data.features.detach()\n )\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n numeric_values: Dict[str, np.ndarray] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n input_vars_subvector.data = input_data_raw.features[:, grad_indices]\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n with th.no_grad():\n best_feature_input = cast(th.Tensor, best_feature_input)\n feature_container = make_tabular_container(best_feature_input)\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n if not self.strict_rounding:\n best_raw_vars: Dict[str, Any] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_best_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=best_raw_vars,\n device=self.device,\n )\n loss_meta = self._compute_loss(problem, input_data_best_raw)\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n else:\n best_raw_vars = {\n name: best_raw_df[[name]]\n .values.squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization.\n Categorical variables are fixed to the values in input_parameters.\n (a grid search of categorical variables is performed in solve)\n This is where gradient descent is performed.\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n objective : co.Objective\n Objective to be optimized\n constraints : Sequence[co.Constraint]\n Constraints to be satisfied\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed: int, by default None\n random seed\n\n Returns\n -------\n Tuple[float, Dict[str, float], flat]\n - objective value\n - variables\n - best loss value\n\n Raises\n ------\n NoSolutionError\n No valid solution is found\n \"\"\"\n\n if not problem.data_processor:\n return self._unprocessed_single_start_opt(problem, seed=seed)\n else:\n return self._processed_single_start_opt(problem, seed=seed)\n\n def solve(\n self, problem: co.SOProblem, seed: Optional[int] = None\n ) -> Tuple[float, Dict[str, float]]:\n if seed is not None:\n th.manual_seed(seed)\n if self.device:\n for constraint in problem.constraints:\n constraint.to(self.device)\n problem.objective.to(self.device)\n\n categorical_variables = [\n name\n for name, variable in problem.variables.items()\n if isinstance(variable, co.EnumVariable)\n ]\n numeric_variables = {\n name: variable\n for name, variable in problem.variables.items()\n if isinstance(variable, co.NumericVariable)\n }\n\n meshed_categorical_vars = self.get_meshed_categorical_vars(problem.variables)\n\n if meshed_categorical_vars is None:\n meshed_categorical_vars = np.array([0])\n\n best_loss_list: List[float] = []\n obj_list: List[float] = []\n vars_list: List[Dict] = []\n for i in range(self.multistart):\n for categorical_cell in meshed_categorical_vars:\n categorical_values = {\n name: categorical_cell[ind]\n for ind, name in enumerate(categorical_variables)\n } # from {id: value} to {name: value}\n fixed_values = {\n **categorical_values,\n **(problem.input_parameters or {}),\n }\n try:\n (\n obj_pred,\n best_raw_vars,\n best_loss,\n ) = self._single_start_opt(\n co.SOProblem(\n variables=numeric_variables, # type: ignore\n input_parameters=fixed_values,\n objective=problem.objective,\n constraints=problem.constraints or [],\n data_processor=problem.data_processor,\n ),\n seed=seed + i if seed is not None else None,\n )\n except NoSolutionError:\n continue\n else:\n best_loss_list.append(best_loss)\n obj_list.append(obj_pred)\n vars_list.append(best_raw_vars)\n if not obj_list:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n idx = np.argmin(best_loss_list)\n vars_cand = vars_list[idx]\n if vars_cand is not None:\n obj_cand = obj_list[idx]\n if obj_cand is None:\n raise Exception(f\"Unexpected objs_list[{idx}] is None.\")\n else:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n return obj_cand, vars_cand\n\n ##################\n ## _loss ##\n ##################\n def constraints_loss(\n self, constraint_values: List[th.Tensor], constraints: Sequence[co.Constraint]\n ) -> th.Tensor:\n \"\"\"\n compute loss of the values of each constraint function fixme: double-check\n\n Parameters\n ----------\n constraint_values : List[th.Tensor]\n values of each constraint function\n constraints : Sequence[co.Constraint]\n constraint functions\n\n Returns\n -------\n th.Tensor\n loss of the values of each constraint function\n\n \"\"\"\n\n # vars: a tensor\n # get loss for constraint functions defined in the problem setting\n total_loss = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n for i, (constraint_value, constraint) in enumerate(\n zip(constraint_values, constraints)\n ):\n stress = (\n self.objective_stress\n if isinstance(constraint, co.Objective)\n else self.constraint_stress\n )\n constraint_violation = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n if constraint.upper is not None and constraint.lower is not None:\n if constraint.upper == constraint.lower:\n constraint_violation = th.abs(constraint_value - constraint.upper)\n else:\n normed_constraint = (constraint_value - constraint.lower) / (\n constraint.upper - constraint.lower\n )\n constraint_violation = th.where(\n (normed_constraint < 0) | (normed_constraint > 1),\n (normed_constraint - 0.5),\n 0,\n )\n elif constraint.lower is not None:\n constraint_violation = th.relu(constraint.lower - constraint_value)\n elif constraint.upper is not None:\n constraint_violation = th.relu(constraint_value - constraint.upper)\n total_loss += (\n constraint_violation**2 + stress * (constraint_violation > 0).float()\n )\n\n return total_loss\n\n def objective_loss(\n self, objective_value: th.Tensor, objective: co.Objective\n ) -> th.Tensor:\n \"\"\"Compute the objective loss for a given objective value:\n - if no bounds are specified, use the squared objective value\n - if both bounds are specified, use the squared normalized\n objective value if it is within the bounds, otherwise\n add a stress term to a squared distance to middle of the bounds\n\n Parameters\n ----------\n objective_value : th.Tensor\n Tensor of objective values\n objective : co.Objective\n Objective function\n\n Returns\n -------\n th.Tensor\n Tensor of objective losses\n\n Raises\n ------\n NotImplementedError\n If only one bound is specified for the objective\n\n \"\"\"\n\n if objective.upper is None and objective.lower is None:\n loss = (\n th.sign(objective_value) * (objective_value**2) * objective.direction\n )\n elif objective.upper is not None and objective.lower is not None:\n norm_cst_obj_pred = (objective_value - objective.lower) / (\n objective.upper - objective.lower\n ) # scaled\n loss = th.where(\n (norm_cst_obj_pred < 0) | (norm_cst_obj_pred > 1),\n (norm_cst_obj_pred - 0.5) ** 2 + self.objective_stress,\n norm_cst_obj_pred * objective.direction,\n )\n else:\n raise NotImplementedError(\"Objective with only one bound is not supported\")\n return loss\n\n def _obj_forward(\n self,\n optimization_element: co.Constraint,\n input_data: Union[UdaoInput, Dict],\n ) -> th.Tensor:\n if isinstance(input_data, UdaoInput):\n return optimization_element.function(input_data) # type: ignore\n else:\n # Dict when unprocessed inputs\n return optimization_element.function(**input_data)\n\n def _compute_loss(\n self, problem: co.SOProblem, input_data: Union[UdaoInput, Dict]\n ) -> Dict[str, Any]:\n obj_output = self._obj_forward(problem.objective, input_data)\n objective_loss = self.objective_loss(obj_output, problem.objective)\n constraint_loss = th.zeros_like(objective_loss, device=self.device)\n\n if problem.constraints:\n const_outputs = [\n self._obj_forward(constraint, input_data)\n for constraint in problem.constraints\n ]\n constraint_loss = self.constraints_loss(const_outputs, problem.constraints)\n\n loss = objective_loss + constraint_loss\n min_loss_id = int(th.argmin(loss).cpu().item())\n\n return {\n \"sum_loss\": th.sum(loss),\n \"min_loss\": th.min(loss).cpu().item(),\n \"min_loss_id\": min_loss_id,\n \"best_obj\": obj_output[min_loss_id].cpu().item(),\n \"is_within_constraint\": bool((constraint_loss[min_loss_id] == 0).item()),\n }\n\n ##################\n ## _get (vars) ##\n ##################\n\n def get_meshed_categorical_vars(\n self, variables: Dict[str, co.Variable]\n ) -> Optional[np.ndarray]:\n \"\"\"\n Get combinations of all categorical (binary, enum) variables\n\n Parameters\n ----------\n variables : Dict[str, co.Variable]\n Variables to be optimized\n\n Returns\n -------\n Optional[np.ndarray]\n Combinations of all categorical variables\n of shape (n_samples, n_vars)\n \"\"\"\n cv_value_list = [\n variable.values\n for variable in variables.values()\n if isinstance(variable, co.EnumVariable)\n ]\n if not cv_value_list:\n return None\n meshed_cv_value_list = [x_.reshape(-1, 1) for x_ in np.meshgrid(*cv_value_list)]\n meshed_cv_value = np.concatenate(meshed_cv_value_list, axis=1)\n return meshed_cv_value\n\n ##################\n ## _check ##\n ##################\n\n @staticmethod\n def within_objective_bounds(obj_value: float, objective: co.Objective) -> bool:\n \"\"\"\n check whether violating the objective value var_ranges\n :param pred_dict: dict, keys are objective names,\n values are objective values\n :param obj_bounds: dict, keys are objective names,\n values are lower and upper var_ranges of each objective value\n :return: True or False\n \"\"\"\n within_bounds = True\n if objective.upper is not None:\n within_bounds = obj_value <= objective.upper\n if objective.lower is not None:\n within_bounds = within_bounds and obj_value >= objective.lower\n return within_bounds" }, { "identifier": "RandomSamplerSolver", "path": "udao/optimization/soo/random_sampler_solver.py", "snippet": "class RandomSamplerSolver(SamplerSolver):\n \"\"\"Solving a SOO problem by random sampling over variables\"\"\"\n\n @dataclass\n class Params:\n n_samples_per_param: int\n \"the number of samples per variable\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n\n def __init__(self, params: Params) -> None:\n super().__init__(params.device)\n self.n_samples_per_param = params.n_samples_per_param\n\n def _process_variable(\n self, var: Variable, seed: Optional[int] = None\n ) -> np.ndarray:\n if seed is not None:\n np.random.seed(seed)\n \"\"\"Generate samples of a variable\"\"\"\n if isinstance(var, FloatVariable):\n return np.random.uniform(var.lower, var.upper, self.n_samples_per_param)\n elif isinstance(var, IntegerVariable):\n return np.random.randint(\n var.lower, var.upper + 1, size=self.n_samples_per_param\n )\n elif isinstance(var, EnumVariable):\n inds = np.random.randint(0, len(var.values), size=self.n_samples_per_param)\n return np.array(var.values)[inds]\n else:\n raise NotImplementedError(\n f\"ERROR: variable type {type(var)} is not supported!\"\n )\n\n def _get_input(\n self, variables: Mapping[str, Variable], seed: Optional[int] = None\n ) -> Dict[str, np.ndarray]:\n \"\"\"\n generate samples of variables\n\n Parameters:\n -----------\n variables: List[Variable],\n lower and upper var_ranges of variables(non-ENUM),\n and values of ENUM variables\n Returns:\n --------\n Dict[str, np.ndarray]\n Dict with array of values for each variable\n \"\"\"\n result_dict = {}\n\n for name, var in variables.items():\n result_dict[name] = self._process_variable(var, seed=seed)\n\n return result_dict" }, { "identifier": "SOSolver", "path": "udao/optimization/soo/so_solver.py", "snippet": "class SOSolver(ABC):\n @abstractmethod\n def solve(\n self,\n problem: SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float]]:\n \"\"\"Solve a single-objective optimization problem\n\n Parameters\n ----------\n problem : SOProblem\n Single-objective optimization problem to solve\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[float, Dict[str, float]]\n A tuple of the objective value and the variables\n that optimize the objective\n \"\"\"\n ..." }, { "identifier": "NoSolutionError", "path": "udao/optimization/utils/exceptions.py", "snippet": "class NoSolutionError(ValueError):\n \"Raised when no solution is found for an MOO problem\"\n ..." }, { "identifier": "even_weights", "path": "udao/optimization/utils/moo_utils.py", "snippet": "def even_weights(stepsize: float, n_objectives: int) -> np.ndarray:\n \"\"\"Generate even weights for 2d and 3D\n\n Parameters\n ----------\n stepsize : float\n Step size for the weights\n n_objectives : int\n Number of objectives for which to generate weights\n Only 2 and 3 are supported\n\n Returns\n -------\n np.ndarray\n Array of weights of shape (n_weights, n_objectives)\n\n Raises\n ------\n Exception\n If `n_objectives` is not 2 or 3\n \"\"\"\n ws_pairs = np.array([])\n if n_objectives == 2:\n w1 = np.hstack([np.arange(0, 1, stepsize), 1])\n w2 = 1 - w1\n ws_pairs = np.array([[w1, w2] for w1, w2 in zip(w1, w2)])\n\n elif n_objectives == 3:\n w_steps = np.linspace(0, 1, num=int(1 / stepsize) + 1, endpoint=True)\n for i, w in enumerate(w_steps):\n # use round to avoid case of floating point limitations in Python\n # the limitation: 1- 0.9 = 0.09999999999998 rather than 0.1\n other_ws_range = round((1 - w), 10)\n w2 = np.linspace(\n 0,\n other_ws_range,\n num=round(other_ws_range / stepsize + 1),\n endpoint=True,\n )\n w3 = other_ws_range - w2\n num = w2.shape[0]\n w1 = np.array([w] * num)\n ws = np.hstack(\n [w1.reshape([num, 1]), w2.reshape([num, 1]), w3.reshape([num, 1])]\n )\n if i == 0:\n ws_pairs = ws\n else:\n ws_pairs = np.vstack([ws_pairs, ws])\n else:\n raise Exception(f\"{n_objectives} objectives are not supported.\")\n\n assert all(np.round(np.sum(ws_pairs, axis=1), 10) == 1)\n return np.array(ws_pairs)" } ]
from typing import Dict, Optional from ....utils.logging import logger from ...concepts import Constraint, Objective from ...concepts.problem import MOProblem from ...moo.weighted_sum import WeightedSum from ...soo.grid_search_solver import GridSearchSolver from ...soo.mogd import MOGD from ...soo.random_sampler_solver import RandomSamplerSolver from ...soo.so_solver import SOSolver from ...utils.exceptions import NoSolutionError from ...utils.moo_utils import even_weights import numpy as np import pytest import torch as th
11,710
class TestWeightedSum: @pytest.mark.parametrize( "inner_solver", [ GridSearchSolver(GridSearchSolver.Params(n_grids_per_var=[2, 7])), RandomSamplerSolver(RandomSamplerSolver.Params(n_samples_per_param=30)), ], ) def test_solve_without_input_parameters( self, inner_solver: SOSolver, simple_problem: MOProblem ) -> None: """solve a dummy minimization problem with 2 objectives and 1 constraint""" ws_pairs = np.array([[0.3, 0.7], [0.6, 0.4]]) simple_problem.input_parameters = None ws_algo = WeightedSum( WeightedSum.Params( so_solver=inner_solver, ws_pairs=ws_pairs, ) ) po_objs, po_vars = ws_algo.solve(problem=simple_problem, seed=0) np.testing.assert_array_almost_equal(po_objs, np.array([[0, 0.2]])) np.testing.assert_equal(po_vars, np.array({"v1": 0, "v2": 2})) @pytest.mark.parametrize( "inner_solver", [ GridSearchSolver(GridSearchSolver.Params(n_grids_per_var=[2, 7])), RandomSamplerSolver(RandomSamplerSolver.Params(n_samples_per_param=30)), ], ) def test_solve_with_input_parameters( self, inner_solver: SOSolver, simple_problem: MOProblem ) -> None: """solve a dummy minimization problem with 2 objectives and 1 constraint""" ws_pairs = np.array([[0.3, 0.7], [0.6, 0.4]]) ws_algo = WeightedSum( WeightedSum.Params( so_solver=inner_solver, ws_pairs=ws_pairs, ) ) po_objs, po_vars = ws_algo.solve(problem=simple_problem, seed=0) np.testing.assert_almost_equal(po_objs, np.array([[1, 1.3]])) np.testing.assert_equal(po_vars, np.array([{"v1": 0, "v2": 3}])) @pytest.mark.parametrize( "inner_solver", [ GridSearchSolver(GridSearchSolver.Params(n_grids_per_var=[2, 7])), RandomSamplerSolver(RandomSamplerSolver.Params(n_samples_per_param=1000)), ], ) def test_solver_with_two_obj_problem( self, inner_solver: SOSolver, two_obj_problem: MOProblem ) -> None: ws_pairs = np.array( [ [0.3, 0.7], [0.6, 0.4], [0.1, 0.9], [0.2, 0.8], [0.4, 0.6], [0.5, 0.5], ] ) ws_algo = WeightedSum( WeightedSum.Params( so_solver=inner_solver, ws_pairs=ws_pairs, ) ) po_objs, po_vars = ws_algo.solve(problem=two_obj_problem, seed=0) np.testing.assert_almost_equal(po_objs, np.array([[0, 0]]), decimal=5) np.testing.assert_almost_equal(po_vars[0]["v1"], 0.0, decimal=3) assert po_vars[0]["v2"] == 1.0 @pytest.mark.parametrize( "strict_rounding", [ True, False, ], ) def test_solver_with_two_obj_problem_mogd( self, strict_rounding: bool, two_obj_problem: MOProblem ) -> None: inner_solver = MOGD( MOGD.Params( learning_rate=0.1, max_iters=100, patience=20, multistart=2, batch_size=10, strict_rounding=strict_rounding, ) )
class TestWeightedSum: @pytest.mark.parametrize( "inner_solver", [ GridSearchSolver(GridSearchSolver.Params(n_grids_per_var=[2, 7])), RandomSamplerSolver(RandomSamplerSolver.Params(n_samples_per_param=30)), ], ) def test_solve_without_input_parameters( self, inner_solver: SOSolver, simple_problem: MOProblem ) -> None: """solve a dummy minimization problem with 2 objectives and 1 constraint""" ws_pairs = np.array([[0.3, 0.7], [0.6, 0.4]]) simple_problem.input_parameters = None ws_algo = WeightedSum( WeightedSum.Params( so_solver=inner_solver, ws_pairs=ws_pairs, ) ) po_objs, po_vars = ws_algo.solve(problem=simple_problem, seed=0) np.testing.assert_array_almost_equal(po_objs, np.array([[0, 0.2]])) np.testing.assert_equal(po_vars, np.array({"v1": 0, "v2": 2})) @pytest.mark.parametrize( "inner_solver", [ GridSearchSolver(GridSearchSolver.Params(n_grids_per_var=[2, 7])), RandomSamplerSolver(RandomSamplerSolver.Params(n_samples_per_param=30)), ], ) def test_solve_with_input_parameters( self, inner_solver: SOSolver, simple_problem: MOProblem ) -> None: """solve a dummy minimization problem with 2 objectives and 1 constraint""" ws_pairs = np.array([[0.3, 0.7], [0.6, 0.4]]) ws_algo = WeightedSum( WeightedSum.Params( so_solver=inner_solver, ws_pairs=ws_pairs, ) ) po_objs, po_vars = ws_algo.solve(problem=simple_problem, seed=0) np.testing.assert_almost_equal(po_objs, np.array([[1, 1.3]])) np.testing.assert_equal(po_vars, np.array([{"v1": 0, "v2": 3}])) @pytest.mark.parametrize( "inner_solver", [ GridSearchSolver(GridSearchSolver.Params(n_grids_per_var=[2, 7])), RandomSamplerSolver(RandomSamplerSolver.Params(n_samples_per_param=1000)), ], ) def test_solver_with_two_obj_problem( self, inner_solver: SOSolver, two_obj_problem: MOProblem ) -> None: ws_pairs = np.array( [ [0.3, 0.7], [0.6, 0.4], [0.1, 0.9], [0.2, 0.8], [0.4, 0.6], [0.5, 0.5], ] ) ws_algo = WeightedSum( WeightedSum.Params( so_solver=inner_solver, ws_pairs=ws_pairs, ) ) po_objs, po_vars = ws_algo.solve(problem=two_obj_problem, seed=0) np.testing.assert_almost_equal(po_objs, np.array([[0, 0]]), decimal=5) np.testing.assert_almost_equal(po_vars[0]["v1"], 0.0, decimal=3) assert po_vars[0]["v2"] == 1.0 @pytest.mark.parametrize( "strict_rounding", [ True, False, ], ) def test_solver_with_two_obj_problem_mogd( self, strict_rounding: bool, two_obj_problem: MOProblem ) -> None: inner_solver = MOGD( MOGD.Params( learning_rate=0.1, max_iters=100, patience=20, multistart=2, batch_size=10, strict_rounding=strict_rounding, ) )
ws_pairs = even_weights(0.1, 2)
10
2023-12-20 09:10:42+00:00
16k
XLearning-SCU/2023-TPAMI-SMILE
Net.py
[ { "identifier": "get_dist_release", "path": "DistComput.py", "snippet": "def get_dist_release(loader, dist_path):\r\n if not os.path.exists(dist_path):\r\n # loader = test_loader\r\n num_data = [10]\r\n with torch.no_grad():\r\n dist_list = [[] for i in range(len(num_data))]\r\n for j, data_t in enumerate(loader, 0):\r\n # get all inputs\r\n fea0, fea1, class_labels0, class_labels1, mask, is_pair, idx = data_t\r\n inputs_t = fea0.cuda()\r\n # inputs_t = torch.cat([fea0,fea1]).cuda()\r\n # labels_t = torch.cat([class_labels0,class_labels1]).cuda()\r\n # inputs_t, _, labels_t, _ = data_t\r\n # inputs_t, labels_t = inputs_t.cuda(), labels_t.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n inputs_t = fea1.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n for ii in range(len(num_data)):\r\n DirectoryOperator(dist_path).make_fold()\r\n np.savetxt(dist_path, np.array(dist_list[ii]))\r\n\r\n dist = torch.from_numpy(\r\n np.loadtxt(\r\n dist_path\r\n ).astype(np.float32)\r\n )\r\n return dist\r" }, { "identifier": "get_nearest_k", "path": "_Utils/Calculator.py", "snippet": "def get_nearest_k(h0, h1, k=1, sp_size=1000):\r\n hh0 = h0.half()\r\n hh1 = h1.half()\r\n split = int(np.ceil(len(hh0) / sp_size))\r\n near = []\r\n for i in range(split):\r\n dist = torch.cdist(hh0[i * sp_size:(i + 1) * sp_size], hh1)\r\n nearest = torch.argsort(dist, dim=1)[:, :k]\r\n near.append(nearest)\r\n nearest = torch.cat(near)\r\n return nearest\r" }, { "identifier": "update_log", "path": "_Utils/Logs.py", "snippet": "def update_log(dic, path='../log/res.csv'):\r\n index = 'Epoch'\r\n val = []\r\n name = []\r\n for na, v in dic.items():\r\n val.append(v)\r\n name.append(na)\r\n dt = pd.DataFrame([val], columns=name)\r\n dt = dt.set_index(index)\r\n if os.path.exists(path):\r\n dt_old = pd.read_csv(path, index_col=index)\r\n dt = merge_csv(dt_old, dt)\r\n DirectoryOperator(path).make_fold()\r\n dt.to_csv(path)\r" }, { "identifier": "visualize2", "path": "_Utils/Scatter.py", "snippet": "def visualize2(feature_vec, type_vec, group_vec, pred_vec, prefix, ):\r\n fv = feature_vec.reshape((len(feature_vec), -1))\r\n for perplexity in []:# 50\r\n vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(\r\n np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)\r\n )\r\n for s in [5]:\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea_multi,\r\n fig_path='{}Multi.svg'.format(prefix2),\r\n label_color=type_vec[group_vec == 0],\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n\r\n for perplexity in [50]:\r\n vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)\r\n for s in [5]: # 5\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.svg'.format(prefix2),\r\n label_color=type_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n # visualize_scatter(vis_fea,\r\n # fig_path='{}Cluster.svg'.format(prefix),\r\n # label_color=pred_vec,\r\n # label_shape=type_vec,\r\n #\r\n # )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.svg'.format(prefix2),\r\n label_color=group_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r" }, { "identifier": "visualize", "path": "_Utils/Visualize.py", "snippet": "def visualize(feature_vec, type_vec, group_vec, pred_vec, prefix='../Visualization/E{:03d}'.format(0)):\r\n vis_fea = tsne(feature_vec)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.jpg'.format(prefix),\r\n label_color=type_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Cluster.jpg'.format(prefix),\r\n label_color=pred_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.jpg'.format(prefix),\r\n label_color=group_vec,\r\n label_shape=type_vec,\r\n )\r" }, { "identifier": "visual_matrix_console", "path": "_Utils/Visualize.py", "snippet": "def visual_matrix_console(x):\r\n if len(x.shape) <= 2:\r\n x = x.reshape((*x.shape, 1))\r\n base_wid = int(np.log10(np.max(x) + 0.5)) + 1\r\n head_wid = x.shape[2] * (1 + base_wid)\r\n head_sep = int(head_wid // 2) + 1\r\n print('t\\\\c ', end='')\r\n for i in range(x.shape[1]):\r\n print(('{:' + '{}'.format(head_sep) + 'd}').format(i), end=' ' * (head_wid - head_sep))\r\n print()\r\n for i, line in enumerate(x):\r\n print('{:2d}: '.format(i), end='')\r\n for cl in line:\r\n sg = True\r\n for g in cl:\r\n if sg:\r\n sg = False\r\n else:\r\n print(' ', end='')\r\n if g != 0:\r\n # print('base_wid == {}'.format(base_wid))\r\n # print('g == {}'.format(g))\r\n print(('{:' + str(base_wid) + 'd}').format(g), end='')\r\n else:\r\n print(' ' * base_wid, end='')\r\n print('|', end='')\r\n print()\r" }, { "identifier": "visualize_image", "path": "_Utils/Visualize.py", "snippet": "def visualize_image(x, verbose=0, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param show:\r\n :param fig_path:\r\n :param x:\r\n (row, line, pic_h, pic_w) or (row, line, pic_h, pic_w, pic_c), pic_c = 1,3,4\r\n :return:\r\n \"\"\"\r\n x = np.asarray(x)\r\n if verbose:\r\n print('img.min() == {}'.format(np.min(x)))\r\n print('img.max() == {}'.format(np.max(x)))\r\n x -= np.min(x)\r\n x /= np.max(x)\r\n row, line = x.shape[:2]\r\n w, h = x.shape[1] * x.shape[3] / 90, x.shape[0] * x.shape[2] / 90\r\n plt.figure(figsize=(w, h)) # w, h\r\n count = 0\r\n for rx in x:\r\n for image in rx:\r\n count += 1\r\n plt.subplot(row, line, count)\r\n plt.imshow(image, cmap='gray', )\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n plt.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0.1 / h, wspace=0.1 / w)\r\n\r\n if not show and fig_path is None:\r\n fig_path = '../_fig/fig.jpg'\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r" }, { "identifier": "plot_heat_map", "path": "_Utils/Visualize.py", "snippet": "def plot_heat_map(z, xticks=None, yticks=None, xlabel=None, ylabel=None, title=None, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param z: z[i,j] shown in i-th row, j-th line\r\n :param xlabel:\r\n :param ylabel:\r\n :param show:\r\n :param fig_path:\r\n :return:\r\n \"\"\"\r\n left = 0.15\r\n right = 1\r\n top = 0.95\r\n bottom = 0.15\r\n w, h = z.shape\r\n plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n\r\n # plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n # plt.subplots_adjust(left=left, right=right, top=top, bottom=bottom)\r\n\r\n if xticks is not None:\r\n plt.xticks(np.arange(len(xticks)), np.round(xticks, 2), rotation=45)\r\n if yticks is not None:\r\n plt.yticks(np.arange(len(yticks)), np.round(yticks, 2))\r\n for i in range(z.shape[0]):\r\n for j in range(z.shape[1]):\r\n # plt.text(j, i, accs[i, j].round(2), ha=\"center\", va=\"center\", color=\"b\", fontsize=12,\r\n # fontname='Times New Roman')\r\n plt.text(j, i, z[i, j], ha=\"center\", va=\"center\")\r\n\r\n if xlabel is not None:\r\n plt.xlabel(xlabel)\r\n if ylabel is not None:\r\n plt.ylabel(ylabel)\r\n if title is not None:\r\n plt.title(title)\r\n plt.imshow(z, interpolation='nearest', aspect='auto')\r\n\r\n plt.colorbar()\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r" }, { "identifier": "TimeOperator", "path": "_Utils/TimeOperator.py", "snippet": "class TimeOperator:\r\n def __init__(self):\r\n self.time_buffer = None\r\n self.time_record = 0\r\n self.time_sum = 0\r\n self.time_count = 0\r\n\r\n def time(self, output=False, promt=''):\r\n if self.time_buffer is None:\r\n self.time_buffer = time()\r\n else:\r\n self.time_record = time() - self.time_buffer\r\n self.time_buffer = None\r\n self.time_sum += self.time_record\r\n self.time_count += 1\r\n if output:\r\n print('{}Time == {:7.05f}'.format(promt, self.time_record))\r\n\r\n def get_time_sum(self):\r\n return self.time_sum\r\n\r\n def show_time_sum(self):\r\n print('{:.02f}'.format(self.get_time_sum()))\r\n\r\n def get_fps(self):\r\n return self.time_count / self.time_sum\r\n\r\n def __get_speed(self, to_metric=None):\r\n speed = self.get_fps()\r\n metric = 'Second'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Minute'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Hour'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 24\r\n metric = 'Day'\r\n return speed, metric\r\n\r\n def show_process(self, process_now, process_total, name='Epoch'):\r\n if self.time_sum <= 0:\r\n return\r\n speed = self.time_sum / self.time_count\r\n print('{:<5s} [{:3.0f}/{:3.0f}] [{:8.02f}/{:8.02f}]: {:5.02f}({:5.02f}) '.format(\r\n name, process_now, process_total,\r\n process_now * speed, process_total * speed,\r\n self.time_record, speed\r\n ))\r\n\r\n def show_speed(self):\r\n speed, metric = self.__get_speed()\r\n print('{:4.01f} Frames/{}'.format(speed, metric))\r" }, { "identifier": "DirectoryOperator", "path": "_Utils/DirectoryOperator.py", "snippet": "class DirectoryOperator:\r\n def __init__(self, directory: str):\r\n self.directory = directory\r\n\r\n def make_fold(self):\r\n if not TestMode:\r\n # print('mk dir {}'.format(os.path.dirname(self.directory)))\r\n os.makedirs(os.path.dirname(self.directory), exist_ok=True)\r\n\r\n def modification_time(self):\r\n if os.path.exists(self.directory):\r\n return os.path.getmtime(self.directory)\r\n else:\r\n warnings.warn('Time_now is returned since the modification time for non-exist file is not available. File: {}'.format(self.directory))\r\n return time.time()\r" }, { "identifier": "get_clusters", "path": "DataSetMaster/dataset.py", "snippet": "def get_clusters(args):\n item_path = os.path.join(path_operator.get_checkpoint_path(level=1), 'Items0321')\n file_mnist_test = os.path.join(item_path, 'mnist_test_clusters89.67.txt')\n file_mnist_train = os.path.join(item_path, 'MnistTrain94.31B256.txt')\n file_amazon = os.path.join(item_path, 'amazon72.81B032ReValue.txt')\n file_webcam = os.path.join(item_path, 'webcamOurLoaderRevalveBatchWiseB032_84.03.txt')\n file_usps = os.path.join(item_path, 'usps_train_clusters85.10.txt')\n root_har = os.path.join(item_path, 'HAR')\n root_mtfl = os.path.join(item_path, 'MTFL')\n\n if args.dataset == 'MNISTUSPS': # 87.75 93.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_usps,\n ]\n elif args.dataset == 'ReverseMNIST': # 89.67 94.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_mnist,\n ]\n elif args.dataset == 'Office': # 75.28\n file_list = [\n file_amazon,\n file_webcam,\n ]\n elif args.dataset == 'MTFL':\n file_list = np.sort([os.path.join(root_mtfl, f) for f in os.listdir(root_mtfl) if f.endswith('txt')])\n elif args.dataset == 'HAR': # 81.70\n file_list = np.sort([os.path.join(root_har, f) for f in os.listdir(root_har) if f.endswith('txt')])\n else:\n raise NotImplementedError(\"\")\n\n def debug(x):\n print(x.shape)\n return x\n\n clusters = torch.cat(\n [debug(torch.from_numpy(np.loadtxt(c).astype(np.float32)).long()) for c in file_list],\n dim=0,\n ).cuda()\n return clusters" }, { "identifier": "svm_classify", "path": "classification.py", "snippet": "def svm_classify(data, data_gt, label, test_prop, C):\n \"\"\"\n trains a linear SVM on the data\n input C specifies the penalty factor of SVM\n \"\"\"\n seed = random.randint(0, 1000)\n train_idx, test_idx = TT_split(data.shape[1], test_prop, seed)\n train_data = np.concatenate([data[0][train_idx], data[1][train_idx]], axis=1)\n test_data = np.concatenate([data_gt[0][test_idx], data_gt[1][test_idx]], axis=1)\n test_label = label[test_idx]\n train_label = label[train_idx]\n\n # print('training SVM...')\n clf = svm.LinearSVC(C=C, dual=False)\n clf.fit(train_data, train_label.ravel())\n\n p = clf.predict(test_data)\n test_acc = accuracy_score(test_label, p)\n\n return test_acc" }, { "identifier": "UMAP", "path": "evaluate.py", "snippet": "def UMAP(feature_vec, type_vec, group_vec, pred_vec, n_type, n_batch, args, epoch, dst_root='../Visualization'):\n t = time.time()\n # print(\"Performing UMAP Visualization...\")\n # print('feature_vec.shape == {}'.format(feature_vec.shape))\n sc.set_figure_params(figsize=(4, 4), dpi=300)\n\n # type_vec = pd.DataFrame(type_vec)\n # for key in cell_type_dict.keys():\n # type_vec.replace(key, cell_type_dict[key], inplace=True)\n # group_vec = pd.DataFrame(group_vec)\n # for key in batch_dict.keys():\n # batch_vec.replace(key, batch_dict[key], inplace=True)\n\n adata = sc.AnnData(feature_vec)\n # print('adata.shape == {}'.format(adata.shape))\n sc.pp.neighbors(adata)\n adata.obs['cluster'] = pd.DataFrame(pred_vec).values.astype(np.str_)\n adata.obs['type'] = pd.DataFrame(type_vec).values.astype(np.str_)\n adata.obs['group'] = pd.DataFrame(group_vec).values.astype(np.str_)\n\n sc.tl.umap(adata)\n sc.pl.umap(adata,\n color=['cluster'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapCluster{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['type'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapType{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['group'],\n palette=sns.color_palette(\"hls\", n_batch),\n save='E{:03d}UmapGroup{}.png'.format(epoch, str(args.dataset)),\n show=False)\n roott = './figures/'\n for root, dirs, files in os.walk(roott):\n # print(root)\n # print(dirs)\n # print(files)\n for f in files:\n # print(os.path.join('../Visualization', f))\n FileOperator(\n os.path.join(root, f)\n ).rename(\n os.path.join(dst_root, f.replace('umapE', 'E')),\n auto_rename=False\n )\n if PrintTimer:\n print('VisualizeScatter finished with in {:.03f} seconds (x.shape == {}).'.format(\n time.time() - t,\n feature_vec.shape,\n ))" }, { "identifier": "evaluate2", "path": "evaluate.py", "snippet": "def evaluate2(feature_vec, pred_vec, type_vec, group_vec):\n nmi, ari, acc, pred_adjusted = cluster_metrics(type_vec, pred_vec)\n gs = np.unique(group_vec)\n ts = np.unique(type_vec)\n class_num = len(ts)\n group_num = len(gs)\n if group_vec is not None and group_num > 1:\n balance, entro = my_balance(pred_vec, group_vec, cluster_num=np.unique(type_vec).shape[0],\n group_num=np.unique(group_vec).shape[0])\n O = torch.zeros((class_num, group_num)).cuda()\n\n for b in gs:\n ind_g = b == group_vec\n pred_vec_g = pred_vec[ind_g]\n for t in ts:\n O[t, b] = np.sum(pred_vec_g == t)\n O += 1e-6\n O = (O / torch.sum(O))\n NmiFair = normalized_mutual_information(O).cpu().numpy()\n Fmeasure = FMeasure(beta=1)(acc, NmiFair)\n else:\n balance, entro = 0, 0\n NmiFair = 0\n Fmeasure = 0\n entro_v = np.mean(entro)\n global BestAcc, BestAri, BestNmi, BestBalance, BestEntropy, BestFairness, BestNmiFair, BestFmeasure\n if BestAcc < acc:\n BestAcc = acc\n if BestAri < ari:\n BestAri = ari\n if BestNmi < nmi:\n BestNmi = nmi\n if BestBalance < balance:\n BestBalance = balance\n # if BestFairness < fairness:\n # BestFairness = fairness\n if BestNmiFair < NmiFair:\n BestNmiFair = NmiFair\n if BestFmeasure < Fmeasure:\n BestFmeasure = Fmeasure\n if BestEntropy < entro_v:\n BestEntropy = entro_v\n\n print(\n 'NMI={:5.02f}|{:5.02f}, ARI={:5.02f}|{:5.02f}, ACC={:5.02f}|{:5.02f}, Balance={:5.02f}|{:5.02f}, NmiFair={:5.02f}|{:5.02f}, Fmeasure={:5.02f}|{:5.02f}, Entropy={:5.02f}|{:5.02f}[{}],'.format(\n nmi * 100, BestNmi * 100,\n ari * 100, BestAri * 100,\n acc * 100, BestAcc * 100,\n balance * 100, BestBalance * 100,\n # fairness * 100, BestFairness * 100,\n NmiFair * 100, BestNmiFair * 100,\n Fmeasure * 100, BestFmeasure * 100,\n entro_v, BestEntropy, entro\n )\n )\n met = {\n 'nmi' : nmi,\n 'ari' : ari,\n 'acc' : acc,\n 'balance' : balance,\n 'NmiFair' : NmiFair,\n 'Fmeasure': Fmeasure,\n }\n return pred_adjusted, met\n # tqdm.write('NMI=%.4f, ACC=%.4f, ARI=%.4f' % (nmi, acc, ari), end='')\n # if fair_metric:\n # kl, ari_b = fair_metrics(feature_vec, group_vec, pred_vec, type_vec)\n # print(', KL=%.4f, ARI_b=%.4f' % (kl, ari_b), end='')\n # tqdm.write('')" }, { "identifier": "visual_image_scatter", "path": "figures/ScatterMaster.py", "snippet": "def visual_image_scatter():\r\n np_path = os.path.join(\r\n 'D:/VirtualMachine/Codes/230904/SMAIL_RunSet_Visual/ --QuickConfig C100 --VisualFreq 5 --VisualRandom 1 --dataset NoisyMNIST30000 --seed 1999 --train_epoch 100/Checkpoints/Epoch099.npz')\r\n # np_path_row = os.path.join(root, np_paths[np_names.index(np_tag)], 'NpPoints', np_epoch)\r\n\r\n data = np.load(np_path, allow_pickle=False)\r\n data_vec = data['data_vec']\r\n feature_vec = data['feature_vec']\r\n group_vec = data['group_vec']\r\n type_vec = data['type_vec']\r\n\r\n # visualize_image(x=[\r\n # [it.reshape([28, 28]) for it in data_vec[:10]],\r\n # [it.reshape([28, 28]) for it in data_vec[10:20]],\r\n # [it.reshape([28, 28]) for it in data_vec[20:30]],\r\n # ], show=True)\r\n\r\n DrawMax = 3000\r\n if len(feature_vec) > DrawMax:\r\n it = np.arange(len(feature_vec))\r\n np.random.shuffle(it)\r\n ind = it[:DrawMax]\r\n feature_vec = feature_vec[ind]\r\n type_vec = type_vec[ind]\r\n group_vec = group_vec[ind]\r\n data_vec = data_vec[ind]\r\n vis_fea = TSNE(perplexity=50).fit_transform(feature_vec)\r\n\r\n _, ax = plt.subplots(figsize=(5 * 1 * 2, 5 * 1 * 2 / 1.6))\r\n\r\n label_color = np.unique(type_vec)\r\n color_num = len(np.unique(type_vec))\r\n # if color_num <= 2:\r\n # cmap = None\r\n if color_num <= 10:\r\n cmap = 'tab10'\r\n elif color_num <= 20:\r\n cmap = 'tab20'\r\n else:\r\n cmap = 'gist_ncar'\r\n for digit in np.unique(type_vec):\r\n ax.scatter(\r\n *vis_fea[type_vec == digit].T,\r\n # marker=f\"${digit}$\",\r\n s=0.5,\r\n # color=plt.cm.Dark2(digit),\r\n alpha=0.7,\r\n c=type_vec[type_vec == digit],\r\n cmap=cmap,\r\n vmax=max(4, np.max(label_color)),\r\n vmin=min(0, np.min(label_color)),\r\n zorder=2,\r\n )\r\n w = int(np.sqrt(len(data_vec[0])))\r\n h = w\r\n shown_images = np.array([[1.0, 1.0]]) # just something big\r\n for i in range(data_vec.shape[0]):\r\n # plot every digit on the embedding\r\n # show an annotation box for a group of digits\r\n dist = np.sum((vis_fea[i] - shown_images) ** 2, 1)\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n shown_images = np.concatenate([shown_images, [vis_fea[i]]], axis=0)\r\n # img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, )\r\n img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, zoom=0.5)\r\n # img.ti\r\n imagebox = offsetbox.AnnotationBbox(\r\n img, # [w, h, 3]\r\n vis_fea[i],\r\n pad=0,\r\n frameon=False\r\n )\r\n imagebox.set(zorder=1)\r\n ax.add_artist(imagebox)\r\n\r\n ax.set_title('title')\r\n ax.axis(\"off\")\r\n plt.tight_layout()\r\n plt.savefig('D:/Pengxin/Temp/tmp.pdf')\r\n plt.show()\r\n\r\n print()\r\n pass\r" } ]
import math import os import time import warnings import numpy as np import torch import torchvision import torch.nn.functional as F import evaluate import faiss import scipy.io as sio from torch import nn from torch.autograd import Variable from DistComput import get_dist_release from _Utils.Calculator import get_nearest_k from _Utils.Logs import update_log from _Utils.Scatter import visualize2 from _Utils.Visualize import visualize, visual_matrix_console, visualize_image, plot_heat_map from _Utils import TimeOperator, DirectoryOperator from DataSetMaster.dataset import get_clusters from classification import svm_classify from evaluate import UMAP, evaluate2 from sklearn import metrics from munkres import Munkres from figures.ScatterMaster import visual_image_scatter
10,809
elif args.reAlign == 'Copy': if torch.sum(to_realign): h1[to_realign] = h0[to_realign] # class_labels1[is_pair == 0] = class_labels0[is_pair == 0] elif args.reAlign == 'KnnMapMean': if torch.sum(to_realign): targ_v1 = h1[is_pair] nearest = get_nearest_k(h0[to_realign], h0[is_pair], args.reAlignK) h1[to_realign] = torch.cat([torch.mean(targ_v1[ns], dim=0) for ns in nearest]) # class_labels1[is_pair == 0] = ... elif args.reAlign == 'Ignore': pass else: raise NotImplementedError('') if args.Rev: fea0_rec, fea1_rec = self.decode([h1, h0]) else: fea0_rec, fea1_rec = self.decode([h0, h1]) # if len(fea0_rec[0]) == len(fea1_rec[0]): # fea_rec = torch.concat([fea0_rec, fea1_rec]) # fea = torch.concat([fea0, fea1]) # mask_c = torch.concat([mask[:, 0], mask[:, 1]]) # if torch.sum(mask_c == 0): # rnmse_vec[0].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 0], xs=fea[mask_c == 0]).cpu().numpy()) # if torch.sum(mask_c == 1): # rnmse_vec[1].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 1], xs=fea[mask_c == 1]).cpu().numpy()) # else: # if torch.sum(mask == 0): # n0_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 0], xs=fea0[mask[:, 0] == 0]).cpu().numpy() # n0_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 0], xs=fea1[mask[:, 1] == 0]).cpu().numpy() # rnmse_vec[0].extend(n0_v0) # rnmse_vec[0].extend(n0_v1) # if torch.sum(mask == 1): # n1_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 1], xs=fea0[mask[:, 0] == 1]).cpu().numpy() # n1_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy() # rnmse_vec[1].extend(n1_v0) # rnmse_vec[1].extend(n1_v1) g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int), torch.ones(len(fea1), device=fea0.device, dtype=torch.int))) h = torch.cat([h0, h1]).detach().cpu().numpy() feature_vec.extend(h) data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy()) group_vec.extend(g.cpu().numpy()) type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy()) inf_data_t = time.time() feature_vec = np.array(feature_vec) data_vec = np.array(data_vec) feature_vec_cluster = np.array(feature_vec_cluster) is_pair_all = np.array(is_pair_all) feature_vec_classification = np.array(feature_vec_classification) group_vec = np.array(group_vec) group_vec_cluster = np.array(group_vec_cluster) type_vec = np.array(type_vec) type_vec_cluster = np.array(type_vec_cluster) rnmse_vec[0] = np.array(rnmse_vec[0]) rnmse_vec[1] = np.array(rnmse_vec[1]) kmeans_time = TimeOperator.Timer() if args.ShowReconstruct: if args.dataset == 'MNISTUSPS': dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets] data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets] Y = test_dataloader.dataset.datasets[0].targets else: dims = [d.shape[1] for d in test_dataloader.dataset.data] data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data] Y = test_dataloader.dataset.class_labels0 mask = test_dataloader.dataset.mask n_per_cat = 10 rec0, rec1 = self.decode([ torch.from_numpy(feature_vec[group_vec == 0]).cuda(), torch.from_numpy(feature_vec[group_vec == 1]).cuda()]) rec0 = rec0.detach().cpu().numpy() rec1 = rec1.detach().cpu().numpy() show_img = np.asarray([]) inds_map = np.asarray([]) for v in range(2): col = np.asarray([]) inds_map_col = np.asarray([]) for y in range(10): inds = np.arange(len(Y))[ np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y) ] np.random.shuffle(inds) assert len(inds) >= n_per_cat inds = inds[:n_per_cat] raw_imgs = data_list[v][inds] missing_imgs = data_list[1 - v][inds] rec_imgs = [rec0, rec1][v][inds] rec_imgs_miss = [rec0, rec1][1 - v][inds] pack = np.asarray( [raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28]) if len(col): col = np.concatenate([col, pack], axis=0) else: col = pack if len(inds_map_col): inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0) else: inds_map_col = inds.reshape([1, -1]) if len(show_img): show_img = np.concatenate([show_img, col], axis=1) else: show_img = col if len(inds_map): inds_map = np.concatenate([inds_map, inds_map_col], axis=1) else: inds_map = inds_map_col plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg')
def show_distribution_ct(type_vec, group_vec, pred_vec, class_num, group_num): v = np.zeros((class_num, class_num, group_num), dtype=int) for t, c, g in zip(type_vec, pred_vec, group_vec): v[t, c, g] += 1 visual_matrix_console(x=v) def kmeans(feature_vec, class_num): d = feature_vec.shape[1] kmeans = faiss.Clustering(d, class_num) kmeans.verbose = False kmeans.niter = 300 kmeans.nredo = 10 # kmeans.spherical = True # if LimitKmeans: # kmeans.max_points_per_centroid = 1000 # kmeans.min_points_per_centroid = 10 res = faiss.StandardGpuResources() cfg = faiss.GpuIndexFlatConfig() cfg.useFloat16 = True cfg.device = 0 index = faiss.GpuIndexFlatL2(res, d, cfg) # print(feature_vec.shape) kmeans.train(feature_vec, index) centroids = faiss.vector_to_array(kmeans.centroids).reshape(class_num, d) return centroids def show_distribution(cluster_vec, group_vec, class_num, group_num): for it in np.arange(group_num): print('{:4d}, '.format(it), end='') print('') cluster_group = torch.zeros((class_num, group_num), dtype=torch.int) for i, j in zip(cluster_vec, group_vec): cluster_group[i, j] += 1 # cluster_group = cluster_group[torch.argsort(torch.sum(cluster_group, dim=1))] for line in cluster_group: print('{:4d}: '.format(torch.sum(line)), end='') for it in line: print('{:4d}, '.format(it), end='') print('') def save_checkpoint(state, epoch): """ it has been trained for *epoch* epochs """ filename = 'Epoch{:03d}.checkpoint'.format(epoch) checkpoint_dir = os.path.join( os.path.dirname(os.getcwd()), 'Checkpoints', filename ) DirectoryOperator.FoldOperator(directory=checkpoint_dir).make_fold() if os.path.exists(checkpoint_dir): warnings.warn('Checkpoint exist and been replaced.({})'.format(checkpoint_dir)) print('Save check point into {}'.format(checkpoint_dir)) torch.save(state, checkpoint_dir) def get_ffn(dims, last_layers=None, with_bn=False, drop_out=0): layers = [] for ind in range(len(dims) - 1): in_dim = dims[ind] out_dim = dims[ind + 1] layers.append(nn.Linear(in_dim, out_dim)) if with_bn: layers.append(nn.BatchNorm1d(out_dim)) layers.append(nn.ReLU()) if drop_out: layers.append(nn.Dropout(drop_out)) if last_layers is not None: layers.extend(last_layers) return nn.Sequential(*layers) def get_cov(dims, strides, last_layers=None, with_bn=False, drop_out=0): layers = [] for ind in range(len(dims) - 1): in_dim = dims[ind] out_dim = dims[ind + 1] stride = strides[ind] # layers.append(nn.Linear(in_dim, out_dim)) if stride >= 0: layers.append(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1)) else: layers.append(nn.ConvTranspose2d( in_dim, out_dim, kernel_size=3, stride=-stride, padding=1, output_padding=0 if stride == -1 else 1)) if with_bn: # layers.append(nn.BatchNorm1d(out_dim)) layers.append(nn.BatchNorm2d(out_dim)) layers.append(nn.ReLU()) if drop_out: layers.append(nn.Dropout(drop_out)) if last_layers is not None: layers.extend(last_layers) return nn.Sequential(*layers) class Net(nn.Module): def __init__(self, args, in_dims, class_num, group_num): super(Net, self).__init__() self.encoder_adaption = nn.ModuleList([ get_ffn([in_dims[i], 1024], with_bn=args.BatchNormType[0] == '1', drop_out=args.Dropout) for i in range(group_num if args.GroupWiseLayer[0] == '1' else 1)]) self.encoder = nn.ModuleList([ get_ffn([1024, 1024, 512], with_bn=args.BatchNormType[1] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[1] == '1' else 1)]) if args.representation_dim == 0: args.representation_dim = class_num self.class_num = class_num self.group_num = group_num self.pred_cac = None self.pred_center_cac = None if args.ElActivationType == 'None': el_activation_ = [] elif args.ElActivationType == 'Normalize': el_activation_ = [] elif args.ElActivationType == 'BnNormalize': el_activation_ = [nn.BatchNorm1d(args.representation_dim)] elif args.ElActivationType == 'BnReNormalize': el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()] elif args.ElActivationType == 'BnRe': el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()] else: raise NotImplementedError('') self.el_activation_ = el_activation_ self.encoder_linear = nn.ModuleList([ get_ffn([512, 256], with_bn=args.BatchNormType[2] == '1', drop_out=args.Dropout, last_layers=[nn.Linear(256, args.representation_dim)] + self.el_activation_) for _ in range(group_num if args.GroupWiseLayer[2] == '1' else 1)]) dec_in = args.representation_dim if args.McDecoder: dec_in *= group_num self.dec_in = dec_in self.decoder_linear = nn.ModuleList([ get_ffn([self.dec_in, 256, 512], with_bn=args.BatchNormType[3] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[3] == '1' else 1)]) if args.ActivationType == 'None': final_activation_ = [] elif args.ActivationType == 'Sigmoid': final_activation_ = [nn.Sigmoid()] elif args.ActivationType == 'Tanh': final_activation_ = [nn.Tanh()] else: raise NotImplementedError('') self.final_activation_ = final_activation_ self.decoder = nn.ModuleList([ get_ffn([512, 1024, 1024], with_bn=args.BatchNormType[4] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[4] == '1' else 1)]) self.decoder_adaption = nn.ModuleList([ get_ffn([], last_layers=[nn.Linear(1024, in_dims[i])] + self.final_activation_) for i in range(group_num if args.GroupWiseLayer[5] == '1' else 1)]) self.args = args self.in_dims = in_dims # def update_cluster_center(self, center): # self.cluster_centers = F.normalize(torch.from_numpy(center), dim=1).cuda() def forward(self, x, **kwargs): return self.decode(self.encode([x])) def encode(self, xs: list): hs = [] for g, x in enumerate(xs): if self.args.noise_type == 'None': pass elif self.args.noise_type == 'Drop': x = x * (Variable(x.data.new(x.size()).normal_(0, 0.1)) < self.args.noise_weight).type_as(x) elif self.args.noise_type == 'Add': x = x + Variable(x.data.new(x.size()).normal_(0, self.args.noise_weight)).type_as(x) else: raise NotImplementedError('') if len(x) != 0: if len(x) == 1: x = torch.concat([x, x]) # print(x.shape) # x = x.view((len(x), -1)) # print(x.shape) x = self.encoder_adaption[g if self.args.GroupWiseLayer[0] == '1' else 0](x) x = self.encoder[g if self.args.GroupWiseLayer[1] == '1' else 0](x) x = self.encoder_linear[g if self.args.GroupWiseLayer[2] == '1' else 0](x) if len(x) == 1: x = x[[0]] if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: x = F.normalize(x, dim=1) else: x = torch.zeros([0, self.args.representation_dim], device=torch.device('cuda:0')) hs.append(x) return hs def soft_ass(self, h, centroids): if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: return h @ centroids.T else: dst = torch.cdist(h, centroids) # return (torch.mean(dst) - dst) / (torch.amax(dst) - torch.amin(dst)) * 2 return -dst / 2 # def encode_class(self, hs): # cs = [] # for h in hs: # c = h @ self.cluster_centers.T # cs.append(c) # return cs def decode(self, hs): xs = [] for g, h in enumerate(hs): if self.args.McDecoder: h = torch.cat(hs, dim=1) if len(h) != 0: if len(h) == 1: h = torch.concat([h, h]) h = self.decoder_linear[g if self.args.GroupWiseLayer[3] == '1' else 0](h) h = self.decoder[g if self.args.GroupWiseLayer[4] == '1' else 0](h) h = self.decoder_adaption[g if self.args.GroupWiseLayer[5] == '1' else 0](h) if len(h) == 1: h = h[[0]] else: h = torch.zeros([0, self.in_dims[g]], device=torch.device('cuda:0')) xs.append(h) return xs def run(self, epochs, train_dataloader, test_dataloader, args): # if args.loss_self_cons: # clusters = get_clusters(args=args) optimizer_g = torch.optim.Adam( self.parameters(), lr=args.LearnRate, betas=(args.betas_a, args.betas_v), weight_decay=args.WeightDecay ) mse_loss = nn.MSELoss().cuda() timer_all = TimeOperator.Timer() timer_train = TimeOperator.Timer() timer_save = TimeOperator.Timer() ce_loss = nn.CrossEntropyLoss().cuda() type_detail_shown = False start_epoch = 0 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) # if args.gpu is None: # checkpoint = torch.load(args.resume) # else: # # Map model to be loaded to specified single gpu. # loc = 'cuda:{}'.format(args.gpu) # checkpoint = torch.load(args.resume, map_location=loc) start_epoch = checkpoint['epoch'] self.load_state_dict(checkpoint['state_dict']) optimizer_g.load_state_dict(checkpoint['optimizer']['optimizer_g']) # self.__dict__ = checkpoint['self_dic'] print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) # self.args = args # warnings.warn('This is not equal to start from the beginning due to different rands states.') # else: raise NotImplementedError("=> no checkpoint found at '{}'".format(args.resume)) if args.CodeTest: args.train_epoch = start_epoch + 1 epochs = start_epoch + 1 best_acc = 0 for epoch in range(start_epoch, epochs): if (epoch + 1) <= args.LearnRateWarm: lr = args.LearnRate * (epoch + 1) / args.LearnRateWarm else: if args.LearnRateDecayType == 'None': lr = args.LearnRate elif args.LearnRateDecayType == 'Exp': lr = args.LearnRate * ((1 + 10 * (epoch + 1 - args.LearnRateWarm) / ( args.train_epoch - args.LearnRateWarm)) ** -0.75) elif args.LearnRateDecayType == 'Cosine': lr = args.LearnRate * 0.5 * (1. + math.cos( math.pi * (epoch + 1 - args.LearnRateWarm) / (args.train_epoch - args.LearnRateWarm))) else: raise NotImplementedError('args.LearnRateDecayType') if lr != args.LearnRate: def adjust_learning_rate(optimizer): print('adjust_learning_rate: {}'.format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr adjust_learning_rate(optimizer_g) timer_all_time = time.time() # inf_t = time.time() # print('start epoch {}'.format(epoch)) self.eval() feature_vec, type_vec, group_vec = [], [], [] feature_vec_cluster = [] group_vec_cluster = [] feature_vec_classification = [] type_vec_cluster = [] data_vec = [] is_pair_all = [] timer_infer_data = TimeOperator.Timer() rnmse_vec = [[], []] # mask = 0 1 with torch.no_grad(): inf_data_t = time.time() for (fea0, fea1, class_labels0, class_labels1, mask, is_pair, index) in test_dataloader: timer_infer_data.update(time.time() - inf_data_t) # timer_infer_data.show(prefix='InferDataTime', total_count=len(test_dataloader), # print_end_time=False) fea0 = fea0.cuda() fea1 = fea1.cuda() if args.Rev: h1, h0 = self.encode([fea0, fea1]) if args.SingleView != -1: for v in range(len(mask[0])): if v != 1 - args.SingleView: mask[:, v] = 0 else: h0, h1 = self.encode([fea0, fea1]) if args.SingleView != -1: for v in range(len(mask[0])): if v != args.SingleView: mask[:, v] = 0 cluster_h0 = h0[mask[:, 0] == 1] cluster_h1 = h1[mask[:, 1] == 1] # if args.SingleView != -1: # mask[:, args.SingleView] = 0 # # if args.SingleView == 0: # # cluster_h1 = cluster_h1[[]] # # class_labels1 = class_labels1[[]] # # elif args.SingleView == 1: # # class_labels0 = class_labels0[[]] # # cluster_h0 = cluster_h0[[]] # # else: # # raise NotImplementedError('') is_pair_all.extend(is_pair) feature_vec_cluster.extend(torch.cat([cluster_h0, cluster_h1]).detach().cpu().numpy()) group_vec_cluster.extend(torch.concat((torch.zeros(len(cluster_h0), dtype=torch.int), torch.ones(len(cluster_h1), dtype=torch.int))).numpy()) type_vec_cluster.extend(torch.concat((class_labels0[mask[:, 0] == 1], class_labels1[mask[:, 1] == 1])).numpy()) feature_vec_classification.extend(torch.cat([h0, h1]).detach().cpu().numpy()) if (epoch + 1) == epochs or (epoch + 1) % args.VisualFreq == 0: if torch.sum(torch.logical_not(torch.logical_or(mask[:, 1], mask[:, 0]))): raise NotImplementedError('存在一个pair两个模态都缺失') if args.reFill == 'Copy': if torch.sum(mask[:, 0] == 0): h0[mask[:, 0] == 0] = h1[mask[:, 0] == 0] if torch.sum(mask[:, 1] == 0): h1[mask[:, 1] == 0] = h0[mask[:, 1] == 0] elif args.reFill == 'Center': # raise NotImplementedError('') if self.pred_center_cac is None: pass warnings.warn('self.pred_center_cac == None') else: centors = torch.zeros((len(mask), 2, len(self.pred_center_cac[0]))).cuda() centors[mask[:, 0] == 1, 0] = self.pred_center_cac[ self.pred_cac[:torch.sum(mask[:, 0] == 1)]] centors[mask[:, 1] == 1, 1] = self.pred_center_cac[ self.pred_cac[torch.sum(mask[:, 0] == 1):]] if torch.sum(mask[:, 0] == 0): h0[mask[:, 0] == 0] = centors[mask[:, 0] == 0, 1] if torch.sum(mask[:, 1] == 0): h1[mask[:, 1] == 0] = centors[mask[:, 1] == 0, 0] elif args.reFill == 'KnnMapMean': if torch.sum(mask[:, 0] == 0): nearest = get_nearest_k(h1[mask[:, 0] == 0], h1[is_pair], args.reAlignK) h0p = h0[is_pair] h1[mask[:, 0] == 0] = torch.cat([torch.mean(h0p[ns], dim=0) for ns in nearest]) if torch.sum(mask[:, 1] == 0): nearest = get_nearest_k(h0[mask[:, 1] == 0], h0[is_pair], args.reAlignK) h1p = h1[is_pair] h1[mask[:, 1] == 0] = torch.cat([torch.mean(h1p[ns], dim=0) for ns in nearest]) # raise NotImplementedError('') elif args.reFill == 'KnnMean': # 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean if torch.sum(mask[:, 1] == 0): hs0 = h0[mask[:, 1] == 0] he1 = h1[mask[:, 1] == 1] nearest = get_nearest_k(hs0, he1, args.reAlignK) # nearest = torch.argsort(torch.cdist(hs0.cpu(), he1.cpu()), dim=1)[:, :args.reAlignK] h1[mask[:, 1] == 0] = torch.cat([torch.mean(he1[ns], dim=0) for ns in nearest]) # class_labels1[mask[:, 1] == 0] = class_labels1[mask[:, 1] == 1][nearest[:, 0]] if torch.sum(mask[:, 0] == 0): hs1 = h1[mask[:, 0] == 0] he0 = h0[mask[:, 0] == 1] nearest = get_nearest_k(hs1, he0, args.reAlignK) # nearest = torch.argsort(torch.cdist(hs1.cpu(), he0.cpu()), dim=1)[:, :args.reAlignK] h0[mask[:, 0] == 0] = torch.cat([torch.mean(he0[ns], dim=0) for ns in nearest]) # class_labels0[mask[:, 0] == 0] = class_labels0[mask[:, 0] == 1][nearest[:, 0]] ############################################################### # 缺失补全, xi2 = mean(离xi1最近的k个view2的点) # fill_num = k # C = euclidean_dist(h0, h1) # row_idx = C.argsort() # col_idx = (C.t()).argsort() # # Mij denotes the flag of i-th sample in view 0 and j-th sample in view 1 # M = torch.logical_and((mask[:, 0].repeat(test_num, 1)).t(), mask[:, 1].repeat(test_num, 1)) # for i in range(test_num): # idx0 = col_idx[i, :][ # M[col_idx[i, :], i]] # idx for view 0 to sort and find the non-missing neighbors # idx1 = row_idx[i, :][ # M[i, row_idx[i, :]]] # idx for view 1 to sort and find the non-missing neighbors # if len(idx1) != 0 and len(idx0) == 0: # i-th sample in view 1 is missing # avg_fill = h1[idx1[0:fill_num], :].sum(dim=0) / fill_num # cnt += (class_labels1[idx1[0:fill_num]] == class_labels1[i]).sum() # missing_cnt += 1 # recover_out0[i, :] = h0[i, :] # recover_out1[i, :] = avg_fill # missing # elif len(idx0) != 0 and len(idx1) == 0: # avg_fill = h0[idx0[0:fill_num], :].sum(dim=0) / fill_num # cnt += (class_labels0[idx0[0:fill_num]] == class_labels0[i]).sum() # missing_cnt += 1 # recover_out0[i, :] = avg_fill # missing # recover_out1[i, :] = h1[i, :] # elif len(idx0) != 0 and len(idx1) != 0: # recover_out0[i, :] = h0[i, :] # recover_out1[i, :] = h1[i, :] # else: # raise Exception('error') # if setting == 1: # align_out0.extend((recover_out0.cpu()).numpy()) # align_out1.extend((recover_out1.cpu()).numpy()) # continue # else: raise NotImplementedError('') to_realign = torch.logical_and(is_pair == 0, torch.logical_and(mask[:, 1], mask[:, 0])) if args.reAlign == 'KnnMean': # 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean if torch.sum(to_realign): ha1 = h1[to_realign] nearest = get_nearest_k(h0[to_realign], ha1, args.reAlignK) # dist = torch.cdist(h0[to_realign].cpu(), ha1.cpu()) # nearest = torch.argsort(dist, dim=1)[:, :args.reAlignK] h1[to_realign] = torch.cat([torch.mean(ha1[ns], dim=0) for ns in nearest]) # class_labels1[is_pair == 0] = class_labels1[is_pair == 0][nearest[:, 0]] elif args.reAlign == 'Copy': if torch.sum(to_realign): h1[to_realign] = h0[to_realign] # class_labels1[is_pair == 0] = class_labels0[is_pair == 0] elif args.reAlign == 'KnnMapMean': if torch.sum(to_realign): targ_v1 = h1[is_pair] nearest = get_nearest_k(h0[to_realign], h0[is_pair], args.reAlignK) h1[to_realign] = torch.cat([torch.mean(targ_v1[ns], dim=0) for ns in nearest]) # class_labels1[is_pair == 0] = ... elif args.reAlign == 'Ignore': pass else: raise NotImplementedError('') if args.Rev: fea0_rec, fea1_rec = self.decode([h1, h0]) else: fea0_rec, fea1_rec = self.decode([h0, h1]) # if len(fea0_rec[0]) == len(fea1_rec[0]): # fea_rec = torch.concat([fea0_rec, fea1_rec]) # fea = torch.concat([fea0, fea1]) # mask_c = torch.concat([mask[:, 0], mask[:, 1]]) # if torch.sum(mask_c == 0): # rnmse_vec[0].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 0], xs=fea[mask_c == 0]).cpu().numpy()) # if torch.sum(mask_c == 1): # rnmse_vec[1].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 1], xs=fea[mask_c == 1]).cpu().numpy()) # else: # if torch.sum(mask == 0): # n0_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 0], xs=fea0[mask[:, 0] == 0]).cpu().numpy() # n0_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 0], xs=fea1[mask[:, 1] == 0]).cpu().numpy() # rnmse_vec[0].extend(n0_v0) # rnmse_vec[0].extend(n0_v1) # if torch.sum(mask == 1): # n1_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 1], xs=fea0[mask[:, 0] == 1]).cpu().numpy() # n1_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy() # rnmse_vec[1].extend(n1_v0) # rnmse_vec[1].extend(n1_v1) g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int), torch.ones(len(fea1), device=fea0.device, dtype=torch.int))) h = torch.cat([h0, h1]).detach().cpu().numpy() feature_vec.extend(h) data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy()) group_vec.extend(g.cpu().numpy()) type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy()) inf_data_t = time.time() feature_vec = np.array(feature_vec) data_vec = np.array(data_vec) feature_vec_cluster = np.array(feature_vec_cluster) is_pair_all = np.array(is_pair_all) feature_vec_classification = np.array(feature_vec_classification) group_vec = np.array(group_vec) group_vec_cluster = np.array(group_vec_cluster) type_vec = np.array(type_vec) type_vec_cluster = np.array(type_vec_cluster) rnmse_vec[0] = np.array(rnmse_vec[0]) rnmse_vec[1] = np.array(rnmse_vec[1]) kmeans_time = TimeOperator.Timer() if args.ShowReconstruct: if args.dataset == 'MNISTUSPS': dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets] data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets] Y = test_dataloader.dataset.datasets[0].targets else: dims = [d.shape[1] for d in test_dataloader.dataset.data] data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data] Y = test_dataloader.dataset.class_labels0 mask = test_dataloader.dataset.mask n_per_cat = 10 rec0, rec1 = self.decode([ torch.from_numpy(feature_vec[group_vec == 0]).cuda(), torch.from_numpy(feature_vec[group_vec == 1]).cuda()]) rec0 = rec0.detach().cpu().numpy() rec1 = rec1.detach().cpu().numpy() show_img = np.asarray([]) inds_map = np.asarray([]) for v in range(2): col = np.asarray([]) inds_map_col = np.asarray([]) for y in range(10): inds = np.arange(len(Y))[ np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y) ] np.random.shuffle(inds) assert len(inds) >= n_per_cat inds = inds[:n_per_cat] raw_imgs = data_list[v][inds] missing_imgs = data_list[1 - v][inds] rec_imgs = [rec0, rec1][v][inds] rec_imgs_miss = [rec0, rec1][1 - v][inds] pack = np.asarray( [raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28]) if len(col): col = np.concatenate([col, pack], axis=0) else: col = pack if len(inds_map_col): inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0) else: inds_map_col = inds.reshape([1, -1]) if len(show_img): show_img = np.concatenate([show_img, col], axis=1) else: show_img = col if len(inds_map): inds_map = np.concatenate([inds_map, inds_map_col], axis=1) else: inds_map = inds_map_col plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg')
visualize_image(show_img, show=True, fig_path='/xlearning/pengxin/Temp/MissingRec.svg')
6
2023-12-21 08:50:36+00:00
16k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": ".venv/Lib/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"u\",\n \"m\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"y\",\n \"b\",\n \"v\",\n \"k\",\n \"x\",\n \"j\",\n \"z\",\n \"q\",\n ],\n \"English—\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"m\",\n \"u\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"b\",\n \"y\",\n \"v\",\n \"k\",\n \"j\",\n \"x\",\n \"z\",\n \"q\",\n ],\n \"German\": [\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"s\",\n \"t\",\n \"a\",\n \"d\",\n \"h\",\n \"u\",\n \"l\",\n \"g\",\n \"o\",\n \"c\",\n \"m\",\n \"b\",\n \"f\",\n \"k\",\n \"w\",\n \"z\",\n \"p\",\n \"v\",\n \"ü\",\n \"ä\",\n \"ö\",\n \"j\",\n ],\n \"French\": [\n \"e\",\n \"a\",\n \"s\",\n \"n\",\n \"i\",\n \"t\",\n \"r\",\n \"l\",\n \"u\",\n \"o\",\n \"d\",\n \"c\",\n \"p\",\n \"m\",\n \"é\",\n \"v\",\n \"g\",\n \"f\",\n \"b\",\n \"h\",\n \"q\",\n \"à\",\n \"x\",\n \"è\",\n \"y\",\n \"j\",\n ],\n \"Dutch\": [\n \"e\",\n \"n\",\n \"a\",\n \"i\",\n \"r\",\n \"t\",\n \"o\",\n \"d\",\n \"s\",\n \"l\",\n \"g\",\n \"h\",\n \"v\",\n \"m\",\n \"u\",\n \"k\",\n \"c\",\n \"p\",\n \"b\",\n \"w\",\n \"j\",\n \"z\",\n \"f\",\n \"y\",\n \"x\",\n \"ë\",\n ],\n \"Italian\": [\n \"e\",\n \"i\",\n \"a\",\n \"o\",\n \"n\",\n \"l\",\n \"t\",\n \"r\",\n \"s\",\n \"c\",\n \"d\",\n \"u\",\n \"p\",\n \"m\",\n \"g\",\n \"v\",\n \"f\",\n \"b\",\n \"z\",\n \"h\",\n \"q\",\n \"è\",\n \"à\",\n \"k\",\n \"y\",\n \"ò\",\n ],\n \"Polish\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"z\",\n \"w\",\n \"s\",\n \"c\",\n \"t\",\n \"k\",\n \"y\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"l\",\n \"j\",\n \"ł\",\n \"g\",\n \"b\",\n \"h\",\n \"ą\",\n \"ę\",\n \"ó\",\n ],\n \"Spanish\": [\n \"e\",\n \"a\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"i\",\n \"l\",\n \"d\",\n \"t\",\n \"c\",\n \"u\",\n \"m\",\n \"p\",\n \"b\",\n \"g\",\n \"v\",\n \"f\",\n \"y\",\n \"ó\",\n \"h\",\n \"q\",\n \"í\",\n \"j\",\n \"z\",\n \"á\",\n ],\n \"Russian\": [\n \"о\",\n \"а\",\n \"е\",\n \"и\",\n \"н\",\n \"с\",\n \"т\",\n \"р\",\n \"в\",\n \"л\",\n \"к\",\n \"м\",\n \"д\",\n \"п\",\n \"у\",\n \"г\",\n \"я\",\n \"ы\",\n \"з\",\n \"б\",\n \"й\",\n \"ь\",\n \"ч\",\n \"х\",\n \"ж\",\n \"ц\",\n ],\n # Jap-Kanji\n \"Japanese\": [\n \"人\",\n \"一\",\n \"大\",\n \"亅\",\n \"丁\",\n \"丨\",\n \"竹\",\n \"笑\",\n \"口\",\n \"日\",\n \"今\",\n \"二\",\n \"彳\",\n \"行\",\n \"十\",\n \"土\",\n \"丶\",\n \"寸\",\n \"寺\",\n \"時\",\n \"乙\",\n \"丿\",\n \"乂\",\n \"气\",\n \"気\",\n \"冂\",\n \"巾\",\n \"亠\",\n \"市\",\n \"目\",\n \"儿\",\n \"見\",\n \"八\",\n \"小\",\n \"凵\",\n \"県\",\n \"月\",\n \"彐\",\n \"門\",\n \"間\",\n \"木\",\n \"東\",\n \"山\",\n \"出\",\n \"本\",\n \"中\",\n \"刀\",\n \"分\",\n \"耳\",\n \"又\",\n \"取\",\n \"最\",\n \"言\",\n \"田\",\n \"心\",\n \"思\",\n \"刂\",\n \"前\",\n \"京\",\n \"尹\",\n \"事\",\n \"生\",\n \"厶\",\n \"云\",\n \"会\",\n \"未\",\n \"来\",\n \"白\",\n \"冫\",\n \"楽\",\n \"灬\",\n \"馬\",\n \"尸\",\n \"尺\",\n \"駅\",\n \"明\",\n \"耂\",\n \"者\",\n \"了\",\n \"阝\",\n \"都\",\n \"高\",\n \"卜\",\n \"占\",\n \"厂\",\n \"广\",\n \"店\",\n \"子\",\n \"申\",\n \"奄\",\n \"亻\",\n \"俺\",\n \"上\",\n \"方\",\n \"冖\",\n \"学\",\n \"衣\",\n \"艮\",\n \"食\",\n \"自\",\n ],\n # Jap-Katakana\n \"Japanese—\": [\n \"ー\",\n \"ン\",\n \"ス\",\n \"・\",\n \"ル\",\n \"ト\",\n \"リ\",\n \"イ\",\n \"ア\",\n \"ラ\",\n \"ッ\",\n \"ク\",\n \"ド\",\n \"シ\",\n \"レ\",\n \"ジ\",\n \"タ\",\n \"フ\",\n \"ロ\",\n \"カ\",\n \"テ\",\n \"マ\",\n \"ィ\",\n \"グ\",\n \"バ\",\n \"ム\",\n \"プ\",\n \"オ\",\n \"コ\",\n \"デ\",\n \"ニ\",\n \"ウ\",\n \"メ\",\n \"サ\",\n \"ビ\",\n \"ナ\",\n \"ブ\",\n \"ャ\",\n \"エ\",\n \"ュ\",\n \"チ\",\n \"キ\",\n \"ズ\",\n \"ダ\",\n \"パ\",\n \"ミ\",\n \"ェ\",\n \"ョ\",\n \"ハ\",\n \"セ\",\n \"ベ\",\n \"ガ\",\n \"モ\",\n \"ツ\",\n \"ネ\",\n \"ボ\",\n \"ソ\",\n \"ノ\",\n \"ァ\",\n \"ヴ\",\n \"ワ\",\n \"ポ\",\n \"ペ\",\n \"ピ\",\n \"ケ\",\n \"ゴ\",\n \"ギ\",\n \"ザ\",\n \"ホ\",\n \"ゲ\",\n \"ォ\",\n \"ヤ\",\n \"ヒ\",\n \"ユ\",\n \"ヨ\",\n \"ヘ\",\n \"ゼ\",\n \"ヌ\",\n \"ゥ\",\n \"ゾ\",\n \"ヶ\",\n \"ヂ\",\n \"ヲ\",\n \"ヅ\",\n \"ヵ\",\n \"ヱ\",\n \"ヰ\",\n \"ヮ\",\n \"ヽ\",\n \"゠\",\n \"ヾ\",\n \"ヷ\",\n \"ヿ\",\n \"ヸ\",\n \"ヹ\",\n \"ヺ\",\n ],\n # Jap-Hiragana\n \"Japanese——\": [\n \"の\",\n \"に\",\n \"る\",\n \"た\",\n \"と\",\n \"は\",\n \"し\",\n \"い\",\n \"を\",\n \"で\",\n \"て\",\n \"が\",\n \"な\",\n \"れ\",\n \"か\",\n \"ら\",\n \"さ\",\n \"っ\",\n \"り\",\n \"す\",\n \"あ\",\n \"も\",\n \"こ\",\n \"ま\",\n \"う\",\n \"く\",\n \"よ\",\n \"き\",\n \"ん\",\n \"め\",\n \"お\",\n \"け\",\n \"そ\",\n \"つ\",\n \"だ\",\n \"や\",\n \"え\",\n \"ど\",\n \"わ\",\n \"ち\",\n \"み\",\n \"せ\",\n \"じ\",\n \"ば\",\n \"へ\",\n \"び\",\n \"ず\",\n \"ろ\",\n \"ほ\",\n \"げ\",\n \"む\",\n \"べ\",\n \"ひ\",\n \"ょ\",\n \"ゆ\",\n \"ぶ\",\n \"ご\",\n \"ゃ\",\n \"ね\",\n \"ふ\",\n \"ぐ\",\n \"ぎ\",\n \"ぼ\",\n \"ゅ\",\n \"づ\",\n \"ざ\",\n \"ぞ\",\n \"ぬ\",\n \"ぜ\",\n \"ぱ\",\n \"ぽ\",\n \"ぷ\",\n \"ぴ\",\n \"ぃ\",\n \"ぁ\",\n \"ぇ\",\n \"ぺ\",\n \"ゞ\",\n \"ぢ\",\n \"ぉ\",\n \"ぅ\",\n \"ゐ\",\n \"ゝ\",\n \"ゑ\",\n \"゛\",\n \"゜\",\n \"ゎ\",\n \"ゔ\",\n \"゚\",\n \"ゟ\",\n \"゙\",\n \"ゕ\",\n \"ゖ\",\n ],\n \"Portuguese\": [\n \"a\",\n \"e\",\n \"o\",\n \"s\",\n \"i\",\n \"r\",\n \"d\",\n \"n\",\n \"t\",\n \"m\",\n \"u\",\n \"c\",\n \"l\",\n \"p\",\n \"g\",\n \"v\",\n \"b\",\n \"f\",\n \"h\",\n \"ã\",\n \"q\",\n \"é\",\n \"ç\",\n \"á\",\n \"z\",\n \"í\",\n ],\n \"Swedish\": [\n \"e\",\n \"a\",\n \"n\",\n \"r\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"d\",\n \"o\",\n \"m\",\n \"k\",\n \"g\",\n \"v\",\n \"h\",\n \"f\",\n \"u\",\n \"p\",\n \"ä\",\n \"c\",\n \"b\",\n \"ö\",\n \"å\",\n \"y\",\n \"j\",\n \"x\",\n ],\n \"Chinese\": [\n \"的\",\n \"一\",\n \"是\",\n \"不\",\n \"了\",\n \"在\",\n \"人\",\n \"有\",\n \"我\",\n \"他\",\n \"这\",\n \"个\",\n \"们\",\n \"中\",\n \"来\",\n \"上\",\n \"大\",\n \"为\",\n \"和\",\n \"国\",\n \"地\",\n \"到\",\n \"以\",\n \"说\",\n \"时\",\n \"要\",\n \"就\",\n \"出\",\n \"会\",\n \"可\",\n \"也\",\n \"你\",\n \"对\",\n \"生\",\n \"能\",\n \"而\",\n \"子\",\n \"那\",\n \"得\",\n \"于\",\n \"着\",\n \"下\",\n \"自\",\n \"之\",\n \"年\",\n \"过\",\n \"发\",\n \"后\",\n \"作\",\n \"里\",\n \"用\",\n \"道\",\n \"行\",\n \"所\",\n \"然\",\n \"家\",\n \"种\",\n \"事\",\n \"成\",\n \"方\",\n \"多\",\n \"经\",\n \"么\",\n \"去\",\n \"法\",\n \"学\",\n \"如\",\n \"都\",\n \"同\",\n \"现\",\n \"当\",\n \"没\",\n \"动\",\n \"面\",\n \"起\",\n \"看\",\n \"定\",\n \"天\",\n \"分\",\n \"还\",\n \"进\",\n \"好\",\n \"小\",\n \"部\",\n \"其\",\n \"些\",\n \"主\",\n \"样\",\n \"理\",\n \"心\",\n \"她\",\n \"本\",\n \"前\",\n \"开\",\n \"但\",\n \"因\",\n \"只\",\n \"从\",\n \"想\",\n \"实\",\n ],\n \"Ukrainian\": [\n \"о\",\n \"а\",\n \"н\",\n \"і\",\n \"и\",\n \"р\",\n \"в\",\n \"т\",\n \"е\",\n \"с\",\n \"к\",\n \"л\",\n \"у\",\n \"д\",\n \"м\",\n \"п\",\n \"з\",\n \"я\",\n \"ь\",\n \"б\",\n \"г\",\n \"й\",\n \"ч\",\n \"х\",\n \"ц\",\n \"ї\",\n ],\n \"Norwegian\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"s\",\n \"i\",\n \"o\",\n \"l\",\n \"d\",\n \"g\",\n \"k\",\n \"m\",\n \"v\",\n \"f\",\n \"p\",\n \"u\",\n \"b\",\n \"h\",\n \"å\",\n \"y\",\n \"j\",\n \"ø\",\n \"c\",\n \"æ\",\n \"w\",\n ],\n \"Finnish\": [\n \"a\",\n \"i\",\n \"n\",\n \"t\",\n \"e\",\n \"s\",\n \"l\",\n \"o\",\n \"u\",\n \"k\",\n \"ä\",\n \"m\",\n \"r\",\n \"v\",\n \"j\",\n \"h\",\n \"p\",\n \"y\",\n \"d\",\n \"ö\",\n \"g\",\n \"c\",\n \"b\",\n \"f\",\n \"w\",\n \"z\",\n ],\n \"Vietnamese\": [\n \"n\",\n \"h\",\n \"t\",\n \"i\",\n \"c\",\n \"g\",\n \"a\",\n \"o\",\n \"u\",\n \"m\",\n \"l\",\n \"r\",\n \"à\",\n \"đ\",\n \"s\",\n \"e\",\n \"v\",\n \"p\",\n \"b\",\n \"y\",\n \"ư\",\n \"d\",\n \"á\",\n \"k\",\n \"ộ\",\n \"ế\",\n ],\n \"Czech\": [\n \"o\",\n \"e\",\n \"a\",\n \"n\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"v\",\n \"r\",\n \"k\",\n \"d\",\n \"u\",\n \"m\",\n \"p\",\n \"í\",\n \"c\",\n \"h\",\n \"z\",\n \"á\",\n \"y\",\n \"j\",\n \"b\",\n \"ě\",\n \"é\",\n \"ř\",\n ],\n \"Hungarian\": [\n \"e\",\n \"a\",\n \"t\",\n \"l\",\n \"s\",\n \"n\",\n \"k\",\n \"r\",\n \"i\",\n \"o\",\n \"z\",\n \"á\",\n \"é\",\n \"g\",\n \"m\",\n \"b\",\n \"y\",\n \"v\",\n \"d\",\n \"h\",\n \"u\",\n \"p\",\n \"j\",\n \"ö\",\n \"f\",\n \"c\",\n ],\n \"Korean\": [\n \"이\",\n \"다\",\n \"에\",\n \"의\",\n \"는\",\n \"로\",\n \"하\",\n \"을\",\n \"가\",\n \"고\",\n \"지\",\n \"서\",\n \"한\",\n \"은\",\n \"기\",\n \"으\",\n \"년\",\n \"대\",\n \"사\",\n \"시\",\n \"를\",\n \"리\",\n \"도\",\n \"인\",\n \"스\",\n \"일\",\n ],\n \"Indonesian\": [\n \"a\",\n \"n\",\n \"e\",\n \"i\",\n \"r\",\n \"t\",\n \"u\",\n \"s\",\n \"d\",\n \"k\",\n \"m\",\n \"l\",\n \"g\",\n \"p\",\n \"b\",\n \"o\",\n \"h\",\n \"y\",\n \"j\",\n \"c\",\n \"w\",\n \"f\",\n \"v\",\n \"z\",\n \"x\",\n \"q\",\n ],\n \"Turkish\": [\n \"a\",\n \"e\",\n \"i\",\n \"n\",\n \"r\",\n \"l\",\n \"ı\",\n \"k\",\n \"d\",\n \"t\",\n \"s\",\n \"m\",\n \"y\",\n \"u\",\n \"o\",\n \"b\",\n \"ü\",\n \"ş\",\n \"v\",\n \"g\",\n \"z\",\n \"h\",\n \"c\",\n \"p\",\n \"ç\",\n \"ğ\",\n ],\n \"Romanian\": [\n \"e\",\n \"i\",\n \"a\",\n \"r\",\n \"n\",\n \"t\",\n \"u\",\n \"l\",\n \"o\",\n \"c\",\n \"s\",\n \"d\",\n \"p\",\n \"m\",\n \"ă\",\n \"f\",\n \"v\",\n \"î\",\n \"g\",\n \"b\",\n \"ș\",\n \"ț\",\n \"z\",\n \"h\",\n \"â\",\n \"j\",\n ],\n \"Farsi\": [\n \"ا\",\n \"ی\",\n \"ر\",\n \"د\",\n \"ن\",\n \"ه\",\n \"و\",\n \"م\",\n \"ت\",\n \"ب\",\n \"س\",\n \"ل\",\n \"ک\",\n \"ش\",\n \"ز\",\n \"ف\",\n \"گ\",\n \"ع\",\n \"خ\",\n \"ق\",\n \"ج\",\n \"آ\",\n \"پ\",\n \"ح\",\n \"ط\",\n \"ص\",\n ],\n \"Arabic\": [\n \"ا\",\n \"ل\",\n \"ي\",\n \"م\",\n \"و\",\n \"ن\",\n \"ر\",\n \"ت\",\n \"ب\",\n \"ة\",\n \"ع\",\n \"د\",\n \"س\",\n \"ف\",\n \"ه\",\n \"ك\",\n \"ق\",\n \"أ\",\n \"ح\",\n \"ج\",\n \"ش\",\n \"ط\",\n \"ص\",\n \"ى\",\n \"خ\",\n \"إ\",\n ],\n \"Danish\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"i\",\n \"s\",\n \"d\",\n \"l\",\n \"o\",\n \"g\",\n \"m\",\n \"k\",\n \"f\",\n \"v\",\n \"u\",\n \"b\",\n \"h\",\n \"p\",\n \"å\",\n \"y\",\n \"ø\",\n \"æ\",\n \"c\",\n \"j\",\n \"w\",\n ],\n \"Serbian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"р\",\n \"с\",\n \"у\",\n \"т\",\n \"к\",\n \"ј\",\n \"в\",\n \"д\",\n \"м\",\n \"п\",\n \"л\",\n \"г\",\n \"з\",\n \"б\",\n \"a\",\n \"i\",\n \"e\",\n \"o\",\n \"n\",\n \"ц\",\n \"ш\",\n ],\n \"Lithuanian\": [\n \"i\",\n \"a\",\n \"s\",\n \"o\",\n \"r\",\n \"e\",\n \"t\",\n \"n\",\n \"u\",\n \"k\",\n \"m\",\n \"l\",\n \"p\",\n \"v\",\n \"d\",\n \"j\",\n \"g\",\n \"ė\",\n \"b\",\n \"y\",\n \"ų\",\n \"š\",\n \"ž\",\n \"c\",\n \"ą\",\n \"į\",\n ],\n \"Slovene\": [\n \"e\",\n \"a\",\n \"i\",\n \"o\",\n \"n\",\n \"r\",\n \"s\",\n \"l\",\n \"t\",\n \"j\",\n \"v\",\n \"k\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"z\",\n \"b\",\n \"g\",\n \"h\",\n \"č\",\n \"c\",\n \"š\",\n \"ž\",\n \"f\",\n \"y\",\n ],\n \"Slovak\": [\n \"o\",\n \"a\",\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"v\",\n \"t\",\n \"s\",\n \"l\",\n \"k\",\n \"d\",\n \"m\",\n \"p\",\n \"u\",\n \"c\",\n \"h\",\n \"j\",\n \"b\",\n \"z\",\n \"á\",\n \"y\",\n \"ý\",\n \"í\",\n \"č\",\n \"é\",\n ],\n \"Hebrew\": [\n \"י\",\n \"ו\",\n \"ה\",\n \"ל\",\n \"ר\",\n \"ב\",\n \"ת\",\n \"מ\",\n \"א\",\n \"ש\",\n \"נ\",\n \"ע\",\n \"ם\",\n \"ד\",\n \"ק\",\n \"ח\",\n \"פ\",\n \"ס\",\n \"כ\",\n \"ג\",\n \"ט\",\n \"צ\",\n \"ן\",\n \"ז\",\n \"ך\",\n ],\n \"Bulgarian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"с\",\n \"в\",\n \"л\",\n \"к\",\n \"д\",\n \"п\",\n \"м\",\n \"з\",\n \"г\",\n \"я\",\n \"ъ\",\n \"у\",\n \"б\",\n \"ч\",\n \"ц\",\n \"й\",\n \"ж\",\n \"щ\",\n \"х\",\n ],\n \"Croatian\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"j\",\n \"s\",\n \"t\",\n \"u\",\n \"k\",\n \"l\",\n \"v\",\n \"d\",\n \"m\",\n \"p\",\n \"g\",\n \"z\",\n \"b\",\n \"c\",\n \"č\",\n \"h\",\n \"š\",\n \"ž\",\n \"ć\",\n \"f\",\n ],\n \"Hindi\": [\n \"क\",\n \"र\",\n \"स\",\n \"न\",\n \"त\",\n \"म\",\n \"ह\",\n \"प\",\n \"य\",\n \"ल\",\n \"व\",\n \"ज\",\n \"द\",\n \"ग\",\n \"ब\",\n \"श\",\n \"ट\",\n \"अ\",\n \"ए\",\n \"थ\",\n \"भ\",\n \"ड\",\n \"च\",\n \"ध\",\n \"ष\",\n \"इ\",\n ],\n \"Estonian\": [\n \"a\",\n \"i\",\n \"e\",\n \"s\",\n \"t\",\n \"l\",\n \"u\",\n \"n\",\n \"o\",\n \"k\",\n \"r\",\n \"d\",\n \"m\",\n \"v\",\n \"g\",\n \"p\",\n \"j\",\n \"h\",\n \"ä\",\n \"b\",\n \"õ\",\n \"ü\",\n \"f\",\n \"c\",\n \"ö\",\n \"y\",\n ],\n \"Thai\": [\n \"า\",\n \"น\",\n \"ร\",\n \"อ\",\n \"ก\",\n \"เ\",\n \"ง\",\n \"ม\",\n \"ย\",\n \"ล\",\n \"ว\",\n \"ด\",\n \"ท\",\n \"ส\",\n \"ต\",\n \"ะ\",\n \"ป\",\n \"บ\",\n \"ค\",\n \"ห\",\n \"แ\",\n \"จ\",\n \"พ\",\n \"ช\",\n \"ข\",\n \"ใ\",\n ],\n \"Greek\": [\n \"α\",\n \"τ\",\n \"ο\",\n \"ι\",\n \"ε\",\n \"ν\",\n \"ρ\",\n \"σ\",\n \"κ\",\n \"η\",\n \"π\",\n \"ς\",\n \"υ\",\n \"μ\",\n \"λ\",\n \"ί\",\n \"ό\",\n \"ά\",\n \"γ\",\n \"έ\",\n \"δ\",\n \"ή\",\n \"ω\",\n \"χ\",\n \"θ\",\n \"ύ\",\n ],\n \"Tamil\": [\n \"க\",\n \"த\",\n \"ப\",\n \"ட\",\n \"ர\",\n \"ம\",\n \"ல\",\n \"ன\",\n \"வ\",\n \"ற\",\n \"ய\",\n \"ள\",\n \"ச\",\n \"ந\",\n \"இ\",\n \"ண\",\n \"அ\",\n \"ஆ\",\n \"ழ\",\n \"ங\",\n \"எ\",\n \"உ\",\n \"ஒ\",\n \"ஸ\",\n ],\n \"Kazakh\": [\n \"а\",\n \"ы\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"л\",\n \"і\",\n \"д\",\n \"с\",\n \"м\",\n \"қ\",\n \"к\",\n \"о\",\n \"б\",\n \"и\",\n \"у\",\n \"ғ\",\n \"ж\",\n \"ң\",\n \"з\",\n \"ш\",\n \"й\",\n \"п\",\n \"г\",\n \"ө\",\n ],\n}" }, { "identifier": "KO_NAMES", "path": ".venv/Lib/site-packages/charset_normalizer/constant.py", "snippet": "KO_NAMES: Set[str] = {\"johab\", \"cp949\", \"euc_kr\"}" }, { "identifier": "LANGUAGE_SUPPORTED_COUNT", "path": ".venv/Lib/site-packages/charset_normalizer/constant.py", "snippet": "LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)" }, { "identifier": "TOO_SMALL_SEQUENCE", "path": ".venv/Lib/site-packages/charset_normalizer/constant.py", "snippet": "TOO_SMALL_SEQUENCE: int = 32" }, { "identifier": "ZH_NAMES", "path": ".venv/Lib/site-packages/charset_normalizer/constant.py", "snippet": "ZH_NAMES: Set[str] = {\"big5\", \"cp950\", \"big5hkscs\", \"hz\"}" }, { "identifier": "is_suspiciously_successive_range", "path": ".venv/Lib/site-packages/charset_normalizer/md.py", "snippet": "@lru_cache(maxsize=1024)\ndef is_suspiciously_successive_range(\n unicode_range_a: Optional[str], unicode_range_b: Optional[str]\n) -> bool:\n \"\"\"\n Determine if two Unicode range seen next to each other can be considered as suspicious.\n \"\"\"\n if unicode_range_a is None or unicode_range_b is None:\n return True\n\n if unicode_range_a == unicode_range_b:\n return False\n\n if \"Latin\" in unicode_range_a and \"Latin\" in unicode_range_b:\n return False\n\n if \"Emoticons\" in unicode_range_a or \"Emoticons\" in unicode_range_b:\n return False\n\n # Latin characters can be accompanied with a combining diacritical mark\n # eg. Vietnamese.\n if (\"Latin\" in unicode_range_a or \"Latin\" in unicode_range_b) and (\n \"Combining\" in unicode_range_a or \"Combining\" in unicode_range_b\n ):\n return False\n\n keywords_range_a, keywords_range_b = unicode_range_a.split(\n \" \"\n ), unicode_range_b.split(\" \")\n\n for el in keywords_range_a:\n if el in UNICODE_SECONDARY_RANGE_KEYWORD:\n continue\n if el in keywords_range_b:\n return False\n\n # Japanese Exception\n range_a_jp_chars, range_b_jp_chars = (\n unicode_range_a\n in (\n \"Hiragana\",\n \"Katakana\",\n ),\n unicode_range_b in (\"Hiragana\", \"Katakana\"),\n )\n if (range_a_jp_chars or range_b_jp_chars) and (\n \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b\n ):\n return False\n if range_a_jp_chars and range_b_jp_chars:\n return False\n\n if \"Hangul\" in unicode_range_a or \"Hangul\" in unicode_range_b:\n if \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n # Chinese/Japanese use dedicated range for punctuation and/or separators.\n if (\"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b) or (\n unicode_range_a in [\"Katakana\", \"Hiragana\"]\n and unicode_range_b in [\"Katakana\", \"Hiragana\"]\n ):\n if \"Punctuation\" in unicode_range_a or \"Punctuation\" in unicode_range_b:\n return False\n if \"Forms\" in unicode_range_a or \"Forms\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n return True" }, { "identifier": "CoherenceMatches", "path": ".venv/Lib/site-packages/charset_normalizer/models.py", "snippet": "class CharsetMatch:\nclass CharsetMatches:\nclass CliDetectionResult:\n def __init__(\n self,\n payload: bytes,\n guessed_encoding: str,\n mean_mess_ratio: float,\n has_sig_or_bom: bool,\n languages: \"CoherenceMatches\",\n decoded_payload: Optional[str] = None,\n ):\n def __eq__(self, other: object) -> bool:\n def __lt__(self, other: object) -> bool:\n def multi_byte_usage(self) -> float:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def add_submatch(self, other: \"CharsetMatch\") -> None:\n def encoding(self) -> str:\n def encoding_aliases(self) -> List[str]:\n def bom(self) -> bool:\n def byte_order_mark(self) -> bool:\n def languages(self) -> List[str]:\n def language(self) -> str:\n def chaos(self) -> float:\n def coherence(self) -> float:\n def percent_chaos(self) -> float:\n def percent_coherence(self) -> float:\n def raw(self) -> bytes:\n def submatch(self) -> List[\"CharsetMatch\"]:\n def has_submatch(self) -> bool:\n def alphabets(self) -> List[str]:\n def could_be_from_charset(self) -> List[str]:\n def output(self, encoding: str = \"utf_8\") -> bytes:\n def fingerprint(self) -> str:\n def __init__(self, results: Optional[List[CharsetMatch]] = None):\n def __iter__(self) -> Iterator[CharsetMatch]:\n def __getitem__(self, item: Union[int, str]) -> CharsetMatch:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\n def append(self, item: CharsetMatch) -> None:\n def best(self) -> Optional[\"CharsetMatch\"]:\n def first(self) -> Optional[\"CharsetMatch\"]:\n def __init__(\n self,\n path: str,\n encoding: Optional[str],\n encoding_aliases: List[str],\n alternative_encodings: List[str],\n language: str,\n alphabets: List[str],\n has_sig_or_bom: bool,\n chaos: float,\n coherence: float,\n unicode_path: Optional[str],\n is_preferred: bool,\n ):\n def __dict__(self) -> Dict[str, Any]: # type: ignore\n def to_json(self) -> str:" }, { "identifier": "is_accentuated", "path": ".venv/Lib/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n or \"WITH MACRON\" in description\n or \"WITH RING ABOVE\" in description\n )" }, { "identifier": "is_latin", "path": ".venv/Lib/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description" }, { "identifier": "is_multi_byte_encoding", "path": ".venv/Lib/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=128)\ndef is_multi_byte_encoding(name: str) -> bool:\n \"\"\"\n Verify is a specific encoding is a multi byte one based on it IANA name\n \"\"\"\n return name in {\n \"utf_8\",\n \"utf_8_sig\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_32\",\n \"utf_32_le\",\n \"utf_32_be\",\n \"utf_7\",\n } or issubclass(\n importlib.import_module(\"encodings.{}\".format(name)).IncrementalDecoder,\n MultibyteIncrementalDecoder,\n )" }, { "identifier": "is_unicode_range_secondary", "path": ".venv/Lib/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))\ndef is_unicode_range_secondary(range_name: str) -> bool:\n return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)" }, { "identifier": "unicode_range", "path": ".venv/Lib/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None" } ]
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,400
def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if ( is_suspiciously_successive_range(discovered_range, character_range) is False ): layer_target_range = discovered_range break if layer_target_range is None: layer_target_range = character_range if layer_target_range not in layers: layers[layer_target_range] = character.lower() continue layers[layer_target_range] += character.lower() return list(layers.values())
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if ( is_suspiciously_successive_range(discovered_range, character_range) is False ): layer_target_range = discovered_range break if layer_target_range is None: layer_target_range = character_range if layer_target_range not in layers: layers[layer_target_range] = character.lower() continue layers[layer_target_range] += character.lower() return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
6
2023-12-16 04:12:01+00:00
16k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n text -- text to be displayed\n header -- flag to indicate this cell is a header cell.\n \"\"\"\n self.text = text\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell.\"\"\"\n if self.header:\n return '<th>%s</th>' % (self.text)\n else:\n return '<td>%s</td>' % (self.text)" }, { "identifier": "SimpleTableImage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableImage(object):\n \"\"\"A table class to create table cells with an image.\n\n Example:\n cell = SimpleTableImage('images/image_1.jpg')\n \"\"\"\n\n def __init__(self, image_file, width=None, height=None):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n image_file -- relative filepath to image file to display.\n width -- (optional) width of the image in pixels\n height -- (optional) height of the image in pixels\n \"\"\"\n self.image_file = image_file\n if width:\n self.width = round(width)\n else:\n self.width = width\n if height:\n self.height = round(height)\n else:\n self.height = height\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell with the image.\"\"\"\n safe_filename = quote(self.image_file)\n output = '<a href=\"%s\" target=\"_blank\">' % (safe_filename)\n output += '<img src=\"%s\"' % (safe_filename)\n if self.height:\n output += ' height=\"%s\"' % (self.height)\n if self.width:\n output += ' width=\"%s\"' % (self.width)\n output += '></a>'\n\n return output" }, { "identifier": "SimpleTableRow", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableRow(object):\n \"\"\"A table class to create table rows, populated by table cells.\n\n Example:\n # Row from list\n row = SimpleTableRow(['Hello,', 'world!'])\n\n # Row from SimpleTableCell\n cell1 = SimpleTableCell('Hello,')\n cell2 = SimpleTableCell('world!')\n row = SimpleTableRow([cell1, cell2])\n \"\"\"\n\n def __init__(self, cells=None, header=False):\n \"\"\"Table row constructor.\n\n Keyword arguments:\n cells -- iterable of SimpleTableCell (default None)\n header -- flag to indicate this row is a header row.\n if the cells are SimpleTableCell, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n \"\"\"\n cells = cells or []\n if isinstance(cells[0], SimpleTableCell):\n self.cells = cells\n else:\n self.cells = [SimpleTableCell(cell, header=header) for cell in cells]\n\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table row and its cells as a string.\"\"\"\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)\n\n def __iter__(self):\n \"\"\"Iterate through row cells\"\"\"\n for cell in self.cells:\n yield cell\n\n def add_cell(self, cell):\n \"\"\"Add a SimpleTableCell object to the list of cells.\"\"\"\n self.cells.append(cell)\n\n def add_cells(self, cells):\n \"\"\"Add a list of SimpleTableCell objects to the list of cells.\"\"\"\n for cell in cells:\n self.cells.append(cell)" }, { "identifier": "SimpleTable", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTable(object):\n \"\"\"A table class to create HTML tables, populated by HTML table rows.\n\n Example:\n # Table from lists\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']])\n\n # Table with header row\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']],\n header_row=['Header1', 'Header2', 'Header3'])\n\n # Table from SimpleTableRow\n rows = SimpleTableRow(['Hello,', 'world!'])\n table = SimpleTable(rows)\n \"\"\"\n\n def __init__(self, rows=None, header_row=None, css_class=None):\n \"\"\"Table constructor.\n\n Keyword arguments:\n rows -- iterable of SimpleTableRow\n header_row -- row that will be displayed at the beginning of the table.\n if this row is SimpleTableRow, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n css_class -- table CSS class\n \"\"\"\n rows = rows or []\n if isinstance(rows[0], SimpleTableRow):\n self.rows = rows\n else:\n self.rows = [SimpleTableRow(row) for row in rows]\n\n if header_row is None:\n self.header_row = None\n elif isinstance(header_row, SimpleTableRow):\n self.header_row = header_row\n else:\n self.header_row = SimpleTableRow(header_row, header=True)\n\n self.css_class = css_class\n\n def __str__(self):\n \"\"\"Return the HTML code for the table as a string.\"\"\"\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)\n\n def __iter__(self):\n \"\"\"Iterate through table rows\"\"\"\n for row in self.rows:\n yield row\n\n def add_row(self, row):\n \"\"\"Add a SimpleTableRow object to the list of rows.\"\"\"\n self.rows.append(row)\n\n def add_rows(self, rows):\n \"\"\"Add a list of SimpleTableRow objects to the list of rows.\"\"\"\n for row in rows:\n self.rows.append(row)" }, { "identifier": "HTMLPage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class HTMLPage(object):\n \"\"\"A class to create HTML pages containing CSS and tables.\"\"\"\n\n def __init__(self, tables=None, css=None, encoding=\"utf-8\"):\n \"\"\"HTML page constructor.\n\n Keyword arguments:\n tables -- List of SimpleTable objects\n css -- Cascading Style Sheet specification that is appended before the\n table string\n encoding -- Characters encoding. Default: UTF-8\n \"\"\"\n self.tables = tables or []\n self.css = css\n self.encoding = encoding\n\n def __str__(self):\n \"\"\"Return the HTML page as a string.\"\"\"\n page = []\n\n if self.css:\n page.append('<style type=\"text/css\">\\n%s\\n</style>' % self.css)\n\n # Set encoding\n page.append('<meta http-equiv=\"Content-Type\" content=\"text/html;'\n 'charset=%s\">' % self.encoding)\n\n for table in self.tables:\n page.append(str(table))\n page.append('<br />')\n\n return '\\n'.join(page)\n\n def __iter__(self):\n \"\"\"Iterate through tables\"\"\"\n for table in self.tables:\n yield table\n\n def save(self, filename):\n \"\"\"Save HTML page to a file using the proper encoding\"\"\"\n with codecs.open(filename, 'w', self.encoding) as outfile:\n for line in str(self):\n outfile.write(line)\n\n def add_table(self, table):\n \"\"\"Add a SimpleTable to the page list of tables\"\"\"\n self.tables.append(table)" }, { "identifier": "tqdm", "path": "backend/scenedetect/platform.py", "snippet": "class FakeTqdmObject:\nclass FakeTqdmLoggingRedirect:\nclass CommandTooLong(Exception):\nclass Template(string.Template):\n def __init__(self, **kawrgs):\n def update(self, n=1):\n def close(self):\n def set_description(self, desc=None, refresh=True):\n def __init__(self, **kawrgs):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\ndef get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:\n def _get_cv2_param(param_name: str) -> Union[int, None]:\ndef get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\ndef get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:\ndef init_logger(log_level: int = logging.INFO,\n show_stdout: bool = False,\n log_file: Optional[str] = None):\ndef invoke_command(args: List[str]) -> int:\ndef get_ffmpeg_path() -> Optional[str]:\ndef get_ffmpeg_version() -> Optional[str]:\ndef get_mkvmerge_version() -> Optional[str]:\ndef get_system_version_info() -> str:\n INFO_TEMPLATE = '[PySceneDetect] %(message)s'\n DEBUG_TEMPLATE = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'" }, { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SceneDetector:\n \"\"\" Base class to inherit from when implementing a scene detection algorithm.\n\n This API is not yet stable and subject to change.\n\n This represents a \"dense\" scene detector, which returns a list of frames where\n the next scene/shot begins in a video.\n\n Also see the implemented scene detectors in the scenedetect.detectors module\n to get an idea of how a particular detector can be created.\n \"\"\"\n # TODO(v0.7): Make this a proper abstract base class.\n\n stats_manager: Optional[StatsManager] = None\n \"\"\"Optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to\n use for caching frame metrics to and from.\"\"\"\n\n # TODO(v1.0): Remove - this is a rarely used case for what is now a neglegible performance gain.\n def is_processing_required(self, frame_num: int) -> bool:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Test if all calculations for a given frame are already done.\n\n Returns:\n False if the SceneDetector has assigned _metric_keys, and the\n stats_manager property is set to a valid StatsManager object containing\n the required frame metrics/calculations for the given frame - thus, not\n needing the frame to perform scene detection.\n\n True otherwise (i.e. the frame_img passed to process_frame is required\n to be passed to process_frame for the given frame_num).\n \"\"\"\n metric_keys = self.get_metrics()\n return not metric_keys or not (self.stats_manager is not None\n and self.stats_manager.metrics_exist(frame_num, metric_keys))\n\n def stats_manager_required(self) -> bool:\n \"\"\"Stats Manager Required: Prototype indicating if detector requires stats.\n\n Returns:\n True if a StatsManager is required for the detector, False otherwise.\n \"\"\"\n return False\n\n def get_metrics(self) -> List[str]:\n \"\"\"Get Metrics: Get a list of all metric names/keys used by the detector.\n\n Returns:\n List of strings of frame metric key names that will be used by\n the detector when a StatsManager is passed to process_frame.\n \"\"\"\n return []\n\n def process_frame(self, frame_num: int, frame_img: Optional[numpy.ndarray]) -> List[int]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[int]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n @property\n def event_buffer_length(self) -> int:\n \"\"\"The amount of frames a given event can be buffered for, in time. Represents maximum\n amount any event can be behind `frame_number` in the result of :meth:`process_frame`.\n \"\"\"\n return 0" }, { "identifier": "SparseSceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SparseSceneDetector(SceneDetector):\n \"\"\"Base class to inherit from when implementing a sparse scene detection algorithm.\n\n This class will be removed in v1.0 and should not be used.\n\n Unlike dense detectors, sparse detectors scene_detect \"events\" and return a *pair* of frames,\n as opposed to just a single cut.\n\n An example of a SparseSceneDetector is the MotionDetector.\n \"\"\"\n\n def process_frame(self, frame_num: int, frame_img: numpy.ndarray) -> List[Tuple[int, int]]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[Tuple[int, int]]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []" }, { "identifier": "StatsManager", "path": "backend/scenedetect/stats_manager.py", "snippet": "class StatsManager:\n \"\"\"Provides a key-value store for frame metrics/calculations which can be used\n for two-pass detection algorithms, as well as saving stats to a CSV file.\n\n Analyzing a statistics CSV file is also very useful for finding the optimal\n algorithm parameters for certain detection methods. Additionally, the data\n may be plotted by a graphing module (e.g. matplotlib) by obtaining the\n metric of interest for a series of frames by iteratively calling get_metrics(),\n after having called the detect_scenes(...) method on the SceneManager object\n which owns the given StatsManager instance.\n\n Only metrics consisting of `float` or `int` should be used currently.\n \"\"\"\n\n def __init__(self, base_timecode: FrameTimecode = None):\n \"\"\"Initialize a new StatsManager.\n\n Arguments:\n base_timecode: Timecode associated with this object. Must not be None (default value\n will be removed in a future release).\n \"\"\"\n # Frame metrics is a dict of frame (int): metric_dict (Dict[str, float])\n # of each frame metric key and the value it represents (usually float).\n self._frame_metrics: Dict[FrameTimecode, Dict[str, float]] = dict()\n self._registered_metrics: Set[str] = set() # Set of frame metric keys.\n self._loaded_metrics: Set[str] = set() # Metric keys loaded from stats file.\n self._metrics_updated: bool = False # Flag indicating if metrics require saving.\n self._base_timecode: Optional[FrameTimecode] = base_timecode # Used for timing calculations.\n\n def register_metrics(self, metric_keys: Iterable[str]) -> None:\n \"\"\"Register a list of metric keys that will be used by the detector.\n\n Used to ensure that multiple detector keys don't overlap.\n\n Raises:\n FrameMetricRegistered: A particular metric_key has already been registered/added\n to the StatsManager. Only if the StatsManager is being used for read-only\n access (i.e. all frames in the video have already been processed for the given\n metric_key in the exception) is this behavior desirable.\n \"\"\"\n for metric_key in metric_keys:\n if metric_key not in self._registered_metrics:\n self._registered_metrics.add(metric_key)\n else:\n raise FrameMetricRegistered(metric_key)\n\n # TODO(v1.0): Change frame_number to a FrameTimecode now that it is just a hash and will\n # be required for VFR support.\n def get_metrics(self, frame_number: int, metric_keys: Iterable[str]) -> List[Any]:\n \"\"\"Return the requested statistics/metrics for a given frame.\n\n Arguments:\n frame_number (int): Frame number to retrieve metrics for.\n metric_keys (List[str]): A list of metric keys to look up.\n\n Returns:\n A list containing the requested frame metrics for the given frame number\n in the same order as the input list of metric keys. If a metric could\n not be found, None is returned for that particular metric.\n \"\"\"\n return [self._get_metric(frame_number, metric_key) for metric_key in metric_keys]\n\n def set_metrics(self, frame_number: int, metric_kv_dict: Dict[str, Any]) -> None:\n \"\"\" Set Metrics: Sets the provided statistics/metrics for a given frame.\n\n Arguments:\n frame_number: Frame number to retrieve metrics for.\n metric_kv_dict: A dict mapping metric keys to the\n respective integer/floating-point metric values to set.\n \"\"\"\n for metric_key in metric_kv_dict:\n self._set_metric(frame_number, metric_key, metric_kv_dict[metric_key])\n\n def metrics_exist(self, frame_number: int, metric_keys: Iterable[str]) -> bool:\n \"\"\" Metrics Exist: Checks if the given metrics/stats exist for the given frame.\n\n Returns:\n bool: True if the given metric keys exist for the frame, False otherwise.\n \"\"\"\n return all([self._metric_exists(frame_number, metric_key) for metric_key in metric_keys])\n\n def is_save_required(self) -> bool:\n \"\"\" Is Save Required: Checks if the stats have been updated since loading.\n\n Returns:\n bool: True if there are frame metrics/statistics not yet written to disk,\n False otherwise.\n \"\"\"\n return self._metrics_updated\n\n def save_to_csv(self,\n csv_file: Union[str, bytes, TextIO],\n base_timecode: Optional[FrameTimecode] = None,\n force_save=True) -> None:\n \"\"\" Save To CSV: Saves all frame metrics stored in the StatsManager to a CSV file.\n\n Arguments:\n csv_file: A file handle opened in write mode (e.g. open('...', 'w')) or a path as str.\n base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility.\n force_save: If True, writes metrics out even if an update is not required.\n\n Raises:\n OSError: If `path` cannot be opened or a write failure occurs.\n \"\"\"\n # TODO(v0.7): Replace with DeprecationWarning that `base_timecode` will be removed in v0.8.\n if base_timecode is not None:\n logger.error('base_timecode is deprecated.')\n\n # Ensure we need to write to the file, and that we have data to do so with.\n if not ((self.is_save_required() or force_save) and self._registered_metrics\n and self._frame_metrics):\n logger.info(\"No metrics to save.\")\n return\n\n assert self._base_timecode is not None\n\n # If we get a path instead of an open file handle, recursively call ourselves\n # again but with file handle instead of path.\n if isinstance(csv_file, (str, bytes)):\n with open(csv_file, 'w') as file:\n self.save_to_csv(csv_file=file, force_save=force_save)\n return\n\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n metric_keys = sorted(list(self._registered_metrics.union(self._loaded_metrics)))\n csv_writer.writerow([COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE] + metric_keys)\n frame_keys = sorted(self._frame_metrics.keys())\n logger.info(\"Writing %d frames to CSV...\", len(frame_keys))\n for frame_key in frame_keys:\n frame_timecode = self._base_timecode + frame_key\n csv_writer.writerow(\n [frame_timecode.get_frames() +\n 1, frame_timecode.get_timecode()] +\n [str(metric) for metric in self.get_metrics(frame_key, metric_keys)])\n\n @staticmethod\n def valid_header(row: List[str]) -> bool:\n \"\"\"Check that the given CSV row is a valid header for a statsfile.\n\n Arguments:\n row: A row decoded from the CSV reader.\n\n Returns:\n True if `row` is a valid statsfile header, False otherwise.\n \"\"\"\n if not row or not len(row) >= 2:\n return False\n if row[0] != COLUMN_NAME_FRAME_NUMBER or row[1] != COLUMN_NAME_TIMECODE:\n return False\n return True\n\n # TODO(v1.0): Remove.\n def load_from_csv(self, csv_file: Union[str, bytes, TextIO]) -> Optional[int]:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Load all metrics stored in a CSV file into the StatsManager instance. Will be removed in a\n future release after becoming a no-op.\n\n Arguments:\n csv_file: A file handle opened in read mode (e.g. open('...', 'r')) or a path as str.\n\n Returns:\n int or None: Number of frames/rows read from the CSV file, or None if the\n input file was blank or could not be found.\n\n Raises:\n StatsFileCorrupt: Stats file is corrupt and can't be loaded, or wrong file\n was specified.\n \"\"\"\n # TODO: Make this an error, then make load_from_csv() a no-op, and finally, remove it.\n logger.warning(\"load_from_csv() is deprecated and will be removed in a future release.\")\n\n # If we get a path instead of an open file handle, check that it exists, and if so,\n # recursively call ourselves again but with file set instead of path.\n if isinstance(csv_file, (str, bytes)):\n if os.path.exists(csv_file):\n with open(csv_file, 'r') as file:\n return self.load_from_csv(csv_file=file)\n # Path doesn't exist.\n return None\n\n # If we get here, file is a valid file handle in read-only text mode.\n csv_reader = csv.reader(csv_file, lineterminator='\\n')\n num_cols = None\n num_metrics = None\n num_frames = None\n # First Row: Frame Num, Timecode, [metrics...]\n try:\n row = next(csv_reader)\n # Backwards compatibility for previous versions of statsfile\n # which included an additional header row.\n if not self.valid_header(row):\n row = next(csv_reader)\n except StopIteration:\n # If the file is blank or we couldn't decode anything, assume the file was empty.\n return None\n if not self.valid_header(row):\n raise StatsFileCorrupt()\n num_cols = len(row)\n num_metrics = num_cols - 2\n if not num_metrics > 0:\n raise StatsFileCorrupt('No metrics defined in CSV file.')\n self._loaded_metrics = row[2:]\n num_frames = 0\n for row in csv_reader:\n metric_dict = {}\n if not len(row) == num_cols:\n raise StatsFileCorrupt('Wrong number of columns detected in stats file row.')\n for i, metric_str in enumerate(row[2:]):\n if metric_str and metric_str != 'None':\n try:\n metric_dict[self._loaded_metrics[i]] = float(metric_str)\n except ValueError:\n raise StatsFileCorrupt('Corrupted value in stats file: %s' %\n metric_str) from ValueError\n frame_number = int(row[0])\n # Switch from 1-based to 0-based frame numbers.\n if frame_number > 0:\n frame_number -= 1\n self.set_metrics(frame_number, metric_dict)\n num_frames += 1\n logger.info('Loaded %d metrics for %d frames.', num_metrics, num_frames)\n self._metrics_updated = False\n return num_frames\n\n def _get_metric(self, frame_number: int, metric_key: str) -> Optional[Any]:\n if self._metric_exists(frame_number, metric_key):\n return self._frame_metrics[frame_number][metric_key]\n return None\n\n def _set_metric(self, frame_number: int, metric_key: str, metric_value: Any) -> None:\n self._metrics_updated = True\n if not frame_number in self._frame_metrics:\n self._frame_metrics[frame_number] = dict()\n self._frame_metrics[frame_number][metric_key] = metric_value\n\n def _metric_exists(self, frame_number: int, metric_key: str) -> bool:\n return (frame_number in self._frame_metrics\n and metric_key in self._frame_metrics[frame_number])" }, { "identifier": "FrameMetricRegistered", "path": "backend/scenedetect/stats_manager.py", "snippet": "class FrameMetricRegistered(Exception):\n \"\"\" Raised when attempting to register a frame metric key which has\n already been registered. \"\"\"\n\n def __init__(self,\n metric_key: str,\n message: str = \"Attempted to re-register frame metric key.\"):\n super().__init__(message)\n self.metric_key = metric_key" } ]
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,375
csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell(
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell(
SimpleTableCell(
0
2023-10-25 02:50:01+00:00
16k
EulerSearch/embedding_studio
embedding_studio/workers/fine_tuning/finetune_embedding_one_param.py
[ { "identifier": "QueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/query_retriever.py", "snippet": "class QueryRetriever(object):\n \"\"\"As we can't exactly predict a schema of storing queries:\n 1. As text exceptly in clickstream service\n 2. As ID of a record with a text\n 3. As a path to an image\n\n We provide an ability to use any query item. So, a user can specify any.\n\n \"\"\"\n\n def setup(self, clickstream_sessions: List[ClickstreamSession]):\n pass\n\n def __call__(self, query: QueryItem):\n return query" }, { "identifier": "RankingData", "path": "embedding_studio/embeddings/data/ranking_data.py", "snippet": "class RankingData:\n def __init__(self, clickstream: DatasetDict, items: DatasetDict):\n self.clickstream = clickstream\n self.items = items" }, { "identifier": "EmbeddingsModelInterface", "path": "embedding_studio/embeddings/models/interface.py", "snippet": "class EmbeddingsModelInterface(pl.LightningModule):\n def __init__(self, same_query_and_items: bool = False):\n \"\"\"In search we have two entities, which could be multi domain: query and search result (item).\n This is the interface we used in fine-tuning procedure.\n\n :param same_query_and_items: are query and items models acutally the same model (default: False)\n \"\"\"\n super(EmbeddingsModelInterface, self).__init__()\n self.same_query_and_items = same_query_and_items\n\n @abstractmethod\n def get_query_model_params(self) -> Iterator[Parameter]:\n pass\n\n @abstractmethod\n def get_items_model_params(self) -> Iterator[Parameter]:\n pass\n\n @abstractmethod\n def fix_query_model(self, num_fixed_layers: int):\n \"\"\"One of fine-tuning hyperparams is num of fixed layers at a query model\n\n :param num_fixed_layers: how many layers to fix\n \"\"\"\n\n @abstractmethod\n def unfix_query_model(self):\n \"\"\"Unfix all layers of a query model.\"\"\"\n\n @abstractmethod\n def fix_item_model(self, num_fixed_layers: int):\n \"\"\"One of fine-tuning hyperparams is num of fixed layers at an item model\n\n :param num_fixed_layers: how many layers to fix\n \"\"\"\n\n @abstractmethod\n def unfix_item_model(self):\n \"\"\"Unfix all layers of an item model.\"\"\"\n\n @abstractmethod\n def forward_query(self, query: Any) -> FloatTensor:\n pass\n\n @abstractmethod\n def forward_items(self, items: List[Any]) -> FloatTensor:\n pass" }, { "identifier": "EmbeddingsFineTuner", "path": "embedding_studio/embeddings/training/embeddings_finetuner.py", "snippet": "class EmbeddingsFineTuner(pl.LightningModule):\n def __init__(\n self,\n model: EmbeddingsModelInterface,\n items_storages: DatasetDict,\n query_retriever: QueryRetriever,\n loss_func: RankingLossInterface,\n fine_tuning_params: FineTuningParams,\n tracker: ExperimentsManager,\n metric_calculators: Optional[List[MetricCalculator]] = None,\n ranker: Callable[\n [FloatTensor, FloatTensor], FloatTensor\n ] = COSINE_SIMILARITY,\n is_similarity: bool = True,\n confidence_calculator: Callable = dummy_confidences,\n step_size: int = 500,\n gamma: float = 0.9,\n ):\n \"\"\"This is a class, that represents embeddings fine-tuning logic,\n designed in the way to be use PytorchLightning Trainer.\n\n :param model: embedding model itself\n :param items_storages: items storage related to a given iteration, as a datasetdict with train and test keys\n :param query_retriever: object to get item related to query, that can be used in \"forward\"\n :param loss_func: loss object for a ranking task\n :param fine_tuning_params: hyper params of fine-tuning task\n :param tracker: experiment management object\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default_params only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n \"\"\"\n if not isinstance(model, EmbeddingsModelInterface):\n raise TypeError(\n \"model must be an instance of EmbeddingsModelInterface\"\n )\n\n if not isinstance(items_storages, DatasetDict):\n raise TypeError(\"items_storages must be a DatasetDict\")\n\n if not isinstance(query_retriever, QueryRetriever):\n raise TypeError(\n \"query_retriever must be an instance of QueryRetriever\"\n )\n\n if not isinstance(loss_func, RankingLossInterface):\n raise TypeError(\n \"loss_func must be an instance of RankingLossInterface\"\n )\n\n if not isinstance(fine_tuning_params, FineTuningParams):\n raise TypeError(\n \"fine_tuning_params must be an instance of FineTuningParams\"\n )\n\n if not isinstance(tracker, ExperimentsManager):\n raise TypeError(\n \"tracker must be an instance of ExperimentsManager\"\n )\n\n if not isinstance(fine_tuning_params, FineTuningParams):\n raise TypeError(\n \"fine_tuning_params must be an instance of FineTuningParams\"\n )\n\n super(EmbeddingsFineTuner, self).__init__()\n self.features_extractor = FeaturesExtractor(\n model,\n ranker,\n is_similarity,\n fine_tuning_params.not_irrelevant_only,\n fine_tuning_params.negative_downsampling,\n fine_tuning_params.min_abs_difference_threshold,\n fine_tuning_params.max_abs_difference_threshold,\n confidence_calculator,\n )\n self.items_storages = items_storages\n self.query_retriever = query_retriever\n\n if not metric_calculators:\n logger.debug(\n \"metric_calculators list is empty - DistanceShift metric will be used by default.\"\n )\n self.calculators = (\n metric_calculators\n if metric_calculators is not None\n else [DistanceShift()]\n )\n\n self.loss_func = loss_func\n self.loss_func.set_margin(fine_tuning_params.margin)\n self.fine_tuning_params = fine_tuning_params\n self.tracker = tracker\n self.step_size = step_size\n self.gamma = gamma\n self._validation_metrics = defaultdict(list)\n\n # Fix layers\n self.features_extractor.model.fix_item_model(\n fine_tuning_params.num_fixed_layers\n )\n self.features_extractor.model.fix_query_model(\n fine_tuning_params.num_fixed_layers\n )\n\n self.automatic_optimization = False\n\n def preprocess_sessions(self, clickstream_dataset: DatasetDict):\n for key in clickstream_dataset.keys():\n item_storage = self.items_storages[key]\n logger.info(\n f\"Calculate ranks for {key} not irrelevant clickstream sessions\"\n )\n for session in clickstream_dataset[key].not_irrelevant:\n unique_values = set(session.ranks.values())\n if len(unique_values) == 0 or None in unique_values:\n session.ranks = self.features_extractor.calculate_ranks(\n session, item_storage, self.query_retriever\n )\n\n logger.info(\n f\"Calculate ranks for {key} irrelevant clickstream sessions\"\n )\n for session in clickstream_dataset[key].irrelevant:\n unique_values = set(session.ranks.values())\n if len(unique_values) == 0 or None in unique_values:\n session.ranks = self.features_extractor.calculate_ranks(\n session, item_storage, self.query_retriever\n )\n\n # Standart LightningModule methods to be overrided to be used in PytorchLightning Trainer\n # 1. Configure optimizers and schedulers\n def configure_optimizers(\n self,\n ) -> Tuple[List[Optimizer], List[LRScheduler]]:\n if not (isinstance(self.step_size, int) and self.step_size > 0):\n raise ValueError(\"step_size must be a positive integer\")\n\n if not (isinstance(self.gamma, float) and 0 < self.gamma < 1):\n raise ValueError(\"gamma must be a float in the range (0, 1)\")\n\n items_optimizer: SGD = SGD(\n self.features_extractor.model.get_items_model_params(),\n lr=self.fine_tuning_params.items_lr,\n weight_decay=self.fine_tuning_params.items_weight_decay,\n )\n items_scheduler: StepLR = StepLR(\n items_optimizer, step_size=self.step_size, gamma=self.gamma\n )\n\n if self.features_extractor.model.same_query_and_items:\n return [items_optimizer], [items_scheduler]\n\n query_optimizer: SGD = SGD(\n self.features_extractor.model.get_query_model_params(),\n lr=self.fine_tuning_params.query_lr,\n weight_decay=self.fine_tuning_params.query_weight_decay,\n )\n query_scheduler: StepLR = torch.optim.lr_scheduler.StepLR(\n query_optimizer, step_size=self.step_size, gamma=self.gamma\n )\n\n return [items_optimizer, query_optimizer], [\n items_scheduler,\n query_scheduler,\n ]\n\n # 2. Training step code with one batch\n def training_step(\n self,\n batch: List[Tuple[ClickstreamSession, ClickstreamSession]],\n batch_idx: int,\n ) -> Union[FloatTensor, Tensor]:\n if not (\n isinstance(batch, (list, tuple))\n and all(\n isinstance(session, tuple) and len(session) == 2\n for session in batch\n )\n ):\n raise ValueError(\n \"batch must be a list or tuple, and each element must be a tuple of two ClickstreamSessions\"\n )\n\n if isinstance(batch, tuple):\n batch = [\n batch,\n ]\n\n # Get current optimizers\n query_optimizer: Optional[Optimizer] = None\n if self.features_extractor.model.same_query_and_items:\n items_optimizer: Optimizer = self.optimizers()\n else:\n items_optimizer, query_optimizer = self.optimizers()\n\n # Reset the gradients of all optimized\n items_optimizer.zero_grad()\n if query_optimizer:\n query_optimizer.zero_grad()\n\n # Calculate features and loss\n # TODO: encapsulate all inference\n features: SessionFeatures = self.features_extractor.forward(\n batch, self.items_storages[\"train\"], self.query_retriever\n )\n loss: FloatTensor = self.loss_func(features)\n # Gradient backward step\n loss.backward()\n\n # Log train loss\n self.tracker.save_metric(MetricValue(\"train_loss\", loss.item()))\n\n # Do a gradient step\n items_optimizer.step()\n if query_optimizer:\n query_optimizer.step()\n\n with torch.no_grad():\n # And calculate metrics\n for calculator in self.calculators:\n for metric in calculator(\n batch,\n self.features_extractor,\n self.items_storages[\"train\"],\n self.query_retriever,\n ):\n self.tracker.save_metric(metric.add_prefix(\"train\"))\n\n return loss\n\n # 3. Validation step code with one batch\n @torch.no_grad()\n def validation_step(\n self,\n batch: List[Tuple[ClickstreamSession, ClickstreamSession]],\n batch_idx: int,\n ) -> Union[FloatTensor, Tensor]:\n if not (\n isinstance(batch, (list, tuple))\n and all(\n isinstance(session, tuple) and len(session) == 2\n for session in batch\n )\n ):\n raise ValueError(\n \"batch must be a list or tuple, and each element must be a tuple of two ClickstreamSessions\"\n )\n\n if isinstance(batch, tuple):\n batch = [\n batch,\n ]\n\n # TODO: encapsulate all inference\n features: SessionFeatures = self.features_extractor.forward(\n batch, self.items_storages[\"test\"], self.query_retriever\n )\n loss: FloatTensor = self.loss_func(features)\n\n # Instead of log test / validation metrics immediately\n # We will accumulate them\n self._validation_metrics[\"loss\"].append(loss.item())\n\n for calculator in self.calculators:\n for metric in calculator(\n batch,\n self.features_extractor,\n self.items_storages[\"test\"],\n self.query_retriever,\n ):\n self._validation_metrics[metric.name].append(metric.value)\n\n return loss\n\n # 4. Aggregation of validation results\n def on_validation_epoch_end(self) -> float:\n loss: Optional[float] = None\n # And log only averages at the end of validation epoch\n for name, values in self._validation_metrics.items():\n mean_value = float(np.mean(values))\n if name == \"loss\":\n loss = mean_value\n self.tracker.save_metric(\n MetricValue(name, mean_value).add_prefix(\"test\")\n )\n\n self._validation_metrics = defaultdict(list)\n\n return loss\n\n @staticmethod\n def create(\n model: EmbeddingsModelInterface,\n settings: FineTuningSettings,\n items_storages: DatasetDict,\n query_retriever: QueryRetriever,\n fine_tuning_params: FineTuningParams,\n tracker: ExperimentsManager,\n ):\n \"\"\"Create embedding fine tuner from settings.\n\n :param model: embedding model itself\n :param settings: fine-tuning settings\n :param items_storages: items storage related to a given iteration, as a datasetdict with train and test keys\n :param query_retriever: object to get item related to query, that can be used in \"forward\"\n :param fine_tuning_params: hyper params of fine-tuning task\n :param tracker: experiment management object\n :return:\n \"\"\"\n return EmbeddingsFineTuner(\n model=model,\n items_storages=items_storages,\n query_retriever=query_retriever,\n loss_func=settings.loss_func,\n fine_tuning_params=fine_tuning_params,\n tracker=tracker,\n metric_calculators=settings.metric_calculators,\n ranker=settings.ranker,\n is_similarity=settings.is_similarity,\n confidence_calculator=settings.confidence_calculator,\n step_size=settings.step_size,\n gamma=settings.gamma,\n )" }, { "identifier": "ExperimentsManager", "path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py", "snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)" }, { "identifier": "FineTuningParams", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_params.py", "snippet": "class FineTuningParams(BaseModel):\n \"\"\"Params of fine-tuning procedure\n\n :param num_fixed_layers: number of fixed embeddings layers\n :param query_lr: learning rate of query model optimizer\n :param items_lr: learning rate of items model optimizer\n :param query_weight_decay: weight decay of query model optimizer\n :param items_weight_decay: weight decay of items model optimizer\n :param margin: margin from MarginRankingLoss\n :param not_irrelevant_only: use only not irrelevant sessions\n :param negative_downsampling: ratio of negative samples to be used\n :param min_abs_difference_threshold: filter out soft pairs abs(neg_dist - pos_dist) < small value (default: 0.0)\n :param max_abs_difference_threshold: filter out hard pairs abs(neg_dist - pos_dist) > huge value (default: 1.0)\n :param examples_order: order of passing examples to a trainer (default: None)\n \"\"\"\n\n num_fixed_layers: int\n query_lr: float\n items_lr: float\n query_weight_decay: float\n items_weight_decay: float\n margin: float\n not_irrelevant_only: bool\n negative_downsampling: float\n min_abs_difference_threshold: float = 0.0\n max_abs_difference_threshold: float = 1.0\n examples_order: List[ExamplesType] = [ExamplesType.all_examples]\n\n class Config:\n arbitrary_types_allowed = True\n\n @validator(\"examples_order\", pre=True, always=True)\n def validate_examples_order(cls, value):\n if isinstance(value, str):\n value = list(map(int, value.split(\",\")))\n elif isinstance(value, tuple):\n value = list(value)\n return [ExamplesType(v) for v in value]\n\n @validator(\"items_lr\", \"query_lr\", pre=True, always=True)\n def validate_positive_float(cls, value):\n if not (isinstance(value, float) and value > 0):\n raise ValueError(f\"{value} must be a positive float\")\n return value\n\n @validator(\n \"items_weight_decay\", \"query_weight_decay\", pre=True, always=True\n )\n def validate_non_negative_float(cls, value):\n if not (isinstance(value, float) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative float\")\n return value\n\n @validator(\"margin\", pre=True, always=True)\n def validate_non_negative_float_margin(cls, value):\n if not (isinstance(value, float) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative float\")\n return value\n\n @validator(\"num_fixed_layers\", pre=True, always=True)\n def validate_non_negative_int(cls, value):\n if not (isinstance(value, int) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative integer\")\n return value\n\n @root_validator(skip_on_failure=True)\n def validate_example_order(cls, values):\n examples_order = values.get(\"examples_order\")\n if examples_order:\n if isinstance(examples_order, str):\n examples_order = list(map(int, examples_order.split(\",\")))\n elif isinstance(examples_order, tuple):\n examples_order = list(examples_order)\n values[\"examples_order\"] = [\n ExamplesType(v) for v in examples_order\n ]\n return values\n\n @property\n def id(self) -> str:\n # Convert the value to bytes (assuming it's a string)\n value_bytes: bytes = str(self).encode(\"utf-8\")\n\n # Create a hash object\n hash_object = hashlib.sha256()\n\n # Update the hash object with the value\n hash_object.update(value_bytes)\n\n # Get the hexadecimal representation of the hash\n unique_id: str = hash_object.hexdigest()\n\n return unique_id\n\n def __str__(self) -> str:\n vals: List[str] = []\n for key, value in sorted(dict(self).items()):\n value = (\n \",\".join(map(str, value)) if isinstance(value, list) else value\n )\n vals.append(f\"{key}: {value}\")\n\n return \" / \".join(vals)" }, { "identifier": "FineTuningSettings", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py", "snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True" } ]
import logging import torch from typing import Optional from pytorch_lightning import Trainer from pytorch_lightning.callbacks import EarlyStopping from torch.utils.data import DataLoader from embedding_studio.embeddings.data.clickstream.query_retriever import ( QueryRetriever, ) from embedding_studio.embeddings.data.ranking_data import RankingData from embedding_studio.embeddings.models.interface import ( EmbeddingsModelInterface, ) from embedding_studio.embeddings.training.embeddings_finetuner import ( EmbeddingsFineTuner, ) from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_params import ( FineTuningParams, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, )
11,796
logger = logging.getLogger(__name__) class CustomDataCollator: def __call__(self, batch): return batch def fine_tune_embedding_model_one_param( initial_model: EmbeddingsModelInterface, settings: FineTuningSettings, ranking_data: RankingData,
logger = logging.getLogger(__name__) class CustomDataCollator: def __call__(self, batch): return batch def fine_tune_embedding_model_one_param( initial_model: EmbeddingsModelInterface, settings: FineTuningSettings, ranking_data: RankingData,
query_retriever: QueryRetriever,
0
2023-10-31 00:33:13+00:00
16k
masked-spacetime-hashing/msth
dataparser.py
[ { "identifier": "camera_utils", "path": "nerfstudio/cameras/camera_utils.py", "snippet": "_EPS = np.finfo(float).eps * 4.0\n M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]\n K = np.array(\n [\n [m00 - m11 - m22, 0.0, 0.0, 0.0],\n [m01 + m10, m11 - m00 - m22, 0.0, 0.0],\n [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],\n [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],\n ]\n )\ndef unit_vector(data: ArrayLike, axis: Optional[int] = None) -> np.ndarray:\ndef quaternion_from_matrix(matrix: ArrayLike, isprecise: bool = False) -> np.ndarray:\ndef quaternion_slerp(\n quat0: ArrayLike, quat1: ArrayLike, fraction: float, spin: int = 0, shortestpath: bool = True\n) -> np.ndarray:\ndef quaternion_matrix(quaternion: ArrayLike) -> np.ndarray:\ndef get_interpolated_poses(pose_a: ArrayLike, pose_b: ArrayLike, steps: int = 10) -> List[float]:\ndef get_interpolated_k(k_a, k_b, steps: int = 10) -> TensorType[3, 4]:\ndef get_interpolated_poses_many(\n poses: TensorType[\"num_poses\", 3, 4],\n Ks: TensorType[\"num_poses\", 3, 3],\n steps_per_transition=10,\n) -> Tuple[TensorType[\"num_poses\", 3, 4], TensorType[\"num_poses\", 3, 3]]:\ndef normalize(x: torch.Tensor) -> TensorType[...]:\ndef normalize_with_norm(x: torch.Tensor, dim: int) -> Tuple[torch.Tensor, torch.Tensor]:\ndef viewmatrix(lookat: torch.Tensor, up: torch.Tensor, pos: torch.Tensor) -> TensorType[...]:\ndef get_distortion_params(\n k1: float = 0.0,\n k2: float = 0.0,\n k3: float = 0.0,\n k4: float = 0.0,\n p1: float = 0.0,\n p2: float = 0.0,\n) -> TensorType[...]:\ndef _compute_residual_and_jacobian(\n x: torch.Tensor,\n y: torch.Tensor,\n xd: torch.Tensor,\n yd: torch.Tensor,\n distortion_params: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,]:\ndef radial_and_tangential_undistort(\n coords: torch.Tensor,\n distortion_params: torch.Tensor,\n eps: float = 1e-3,\n max_iterations: int = 10,\n) -> torch.Tensor:\ndef rotation_matrix(a: TensorType[3], b: TensorType[3]) -> TensorType[3, 3]:\ndef auto_orient_and_center_poses(\n poses: TensorType[\"num_poses\":..., 4, 4], method: Literal[\"pca\", \"up\", \"none\"] = \"up\", center_poses: bool = True\n) -> Tuple[TensorType[\"num_poses\":..., 3, 4], TensorType[4, 4]]:" }, { "identifier": "CAMERA_MODEL_TO_TYPE", "path": "nerfstudio/cameras/cameras.py", "snippet": "CAMERA_MODEL_TO_TYPE = {\n \"SIMPLE_PINHOLE\": CameraType.PERSPECTIVE,\n \"PINHOLE\": CameraType.PERSPECTIVE,\n \"SIMPLE_RADIAL\": CameraType.PERSPECTIVE,\n \"RADIAL\": CameraType.PERSPECTIVE,\n \"OPENCV\": CameraType.PERSPECTIVE,\n \"OPENCV_FISHEYE\": CameraType.FISHEYE,\n \"EQUIRECTANGULAR\": CameraType.EQUIRECTANGULAR,\n}" }, { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras\n down the line in cases where your batches of camera data don't come from the same cameras.\n\n If a single value is provided, it is broadcasted to all cameras.\n\n Args:\n camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format\n fx: Focal length x\n fy: Focal length y\n cx: Principal point x\n cy: Principal point y\n width: Image width\n height: Image height\n distortion_params: OpenCV 6 radial distortion coefficients\n camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.\n times: Timestamps for each camera\n \"\"\"\n\n camera_to_worlds: TensorType[\"num_cameras\":..., 3, 4]\n fx: TensorType[\"num_cameras\":..., 1]\n fy: TensorType[\"num_cameras\":..., 1]\n cx: TensorType[\"num_cameras\":..., 1]\n cy: TensorType[\"num_cameras\":..., 1]\n width: TensorType[\"num_cameras\":..., 1]\n height: TensorType[\"num_cameras\":..., 1]\n distortion_params: Optional[TensorType[\"num_cameras\":..., 6]]\n camera_type: TensorType[\"num_cameras\":..., 1]\n times: Optional[TensorType[\"num_cameras\", 1]]\n\n def __init__(\n self,\n camera_to_worlds: TensorType[\"batch_c2ws\":..., 3, 4],\n fx: Union[TensorType[\"batch_fxs\":..., 1], float],\n fy: Union[TensorType[\"batch_fys\":..., 1], float],\n cx: Union[TensorType[\"batch_cxs\":..., 1], float],\n cy: Union[TensorType[\"batch_cys\":..., 1], float],\n width: Optional[Union[TensorType[\"batch_ws\":..., 1], int]] = None,\n height: Optional[Union[TensorType[\"batch_hs\":..., 1], int]] = None,\n distortion_params: Optional[TensorType[\"batch_dist_params\":..., 6]] = None,\n camera_type: Optional[\n Union[\n TensorType[\"batch_cam_types\":..., 1],\n int,\n List[CameraType],\n CameraType,\n ]\n ] = CameraType.PERSPECTIVE,\n times: Optional[TensorType[\"num_cameras\"]] = None,\n ) -> None:\n \"\"\"Initializes the Cameras object.\n\n Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]\n (in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or\n TensorType[1] (in the case of the rest of the elements). The dimensions before that are\n considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast\n all the tensors to be the same batch dimension. This means you can use any combination of the\n input types in the function signature and it won't break. Your batch size for all tensors\n must be broadcastable to the same size, and the resulting number of batch dimensions will be\n the batch dimension with the largest number of dimensions.\n \"\"\"\n\n # This will notify the tensordataclass that we have a field with more than 1 dimension\n self._field_custom_dimensions = {\"camera_to_worlds\": 2}\n\n self.camera_to_worlds = camera_to_worlds\n\n # fx fy calculation\n self.fx = self._init_get_fc_xy(fx, \"fx\") # @dataclass's post_init will take care of broadcasting\n self.fy = self._init_get_fc_xy(fy, \"fy\") # @dataclass's post_init will take care of broadcasting\n\n # cx cy calculation\n self.cx = self._init_get_fc_xy(cx, \"cx\") # @dataclass's post_init will take care of broadcasting\n self.cy = self._init_get_fc_xy(cy, \"cy\") # @dataclass's post_init will take care of broadcasting\n\n # Distortion Params Calculation:\n self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting\n\n # @dataclass's post_init will take care of broadcasting\n self.height = self._init_get_height_width(height, self.cy)\n self.width = self._init_get_height_width(width, self.cx)\n self.camera_type = self._init_get_camera_type(camera_type)\n self.times = self._init_get_times(times)\n\n self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors\n\n self._use_nerfacc = strtobool(os.environ.get(\"INTERSECT_WITH_NERFACC\", \"TRUE\"))\n\n def _init_get_fc_xy(self, fc_xy: Union[float, torch.Tensor], name: str) -> torch.Tensor:\n \"\"\"\n Parses the input focal length / principle point x or y and returns a tensor of the correct shape\n\n Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we\n just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.\n\n Args:\n fc_xy: The focal length / principle point x or y\n name: The name of the variable. Used for error messages\n \"\"\"\n if isinstance(fc_xy, float):\n fc_xy = torch.Tensor([fc_xy], device=self.device)\n elif isinstance(fc_xy, torch.Tensor):\n if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:\n fc_xy = fc_xy.unsqueeze(-1)\n fc_xy = fc_xy.to(self.device)\n else:\n raise ValueError(f\"{name} must be a float or tensor, got {type(fc_xy)}\")\n return fc_xy\n\n def _init_get_camera_type(\n self,\n camera_type: Union[\n TensorType[\"batch_cam_types\":..., 1], TensorType[\"batch_cam_types\":...], int, List[CameraType], CameraType\n ],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument camera_type\n\n Camera Type Calculation:\n If CameraType, convert to int and then to tensor, then broadcast to all cameras\n If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n\n Args:\n camera_type: camera_type argument from __init__()\n \"\"\"\n if isinstance(camera_type, CameraType):\n camera_type = torch.tensor([camera_type.value], device=self.device)\n elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):\n camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)\n elif isinstance(camera_type, int):\n camera_type = torch.tensor([camera_type], device=self.device)\n elif isinstance(camera_type, torch.Tensor):\n assert not torch.is_floating_point(\n camera_type\n ), f\"camera_type tensor must be of type int, not: {camera_type.dtype}\"\n camera_type = camera_type.to(self.device)\n if camera_type.ndim == 0 or camera_type.shape[-1] != 1:\n camera_type = camera_type.unsqueeze(-1)\n # assert torch.all(\n # camera_type.view(-1)[0] == camera_type\n # ), \"Batched cameras of different camera_types will be allowed in the future.\"\n else:\n raise ValueError(\n 'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor[\"num_cameras\"]. \\\n Received: '\n + str(type(camera_type))\n )\n return camera_type\n\n def _init_get_height_width(\n self,\n h_w: Union[TensorType[\"batch_hws\":..., 1], TensorType[\"batch_hws\":...], int, None],\n c_x_y: TensorType[\"batch_cxys\":...],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument for height or width\n\n Height/Width Calculation:\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n If none, use cx or cy * 2\n Else raise error\n\n Args:\n h_w: height or width argument from __init__()\n c_x_y: cx or cy for when h_w == None\n \"\"\"\n if isinstance(h_w, int):\n h_w = torch.as_tensor([h_w]).to(torch.int64).to(self.device)\n elif isinstance(h_w, torch.Tensor):\n assert not torch.is_floating_point(h_w), f\"height and width tensor must be of type int, not: {h_w.dtype}\"\n h_w = h_w.to(torch.int64).to(self.device)\n if h_w.ndim == 0 or h_w.shape[-1] != 1:\n h_w = h_w.unsqueeze(-1)\n # assert torch.all(h_w == h_w.view(-1)[0]), \"Batched cameras of different h, w will be allowed in the future.\"\n elif h_w is None:\n h_w = torch.as_tensor((c_x_y * 2)).to(torch.int64).to(self.device)\n else:\n raise ValueError(\"Height must be an int, tensor, or None, received: \" + str(type(h_w)))\n return h_w\n\n def _init_get_times(self, times: Union[None, torch.Tensor]) -> Union[None, torch.Tensor]:\n if times is None:\n times = None\n elif isinstance(times, torch.Tensor):\n if times.ndim == 0 or times.shape[-1] != 1:\n times = times.unsqueeze(-1).to(self.device)\n else:\n raise ValueError(f\"times must be None or a tensor, got {type(times)}\")\n\n return times\n\n @property\n def device(self) -> TORCH_DEVICE:\n \"\"\"Returns the device that the camera is on.\"\"\"\n return self.camera_to_worlds.device\n\n @property\n def image_height(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.height\n\n @property\n def image_width(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.width\n\n @property\n def is_jagged(self) -> bool:\n \"\"\"\n Returns whether or not the cameras are \"jagged\" (i.e. the height and widths are different, meaning that\n you cannot concatenate the image coordinate maps together)\n \"\"\"\n h_jagged = not torch.all(self.height == self.height.view(-1)[0])\n w_jagged = not torch.all(self.width == self.width.view(-1)[0])\n return h_jagged or w_jagged\n\n def get_image_coords(\n self, pixel_offset: float = 0.5, index: Optional[Tuple] = None\n ) -> TensorType[\"height\", \"width\", 2]:\n \"\"\"This gets the image coordinates of one of the cameras in this object.\n\n If no index is specified, it will return the maximum possible sized height / width image coordinate map,\n by looking at the maximum height and width of all the cameras in this object.\n\n Args:\n pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)\n index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th\n flattened camera\n\n Returns:\n Grid of image coordinates.\n \"\"\"\n if index is None:\n image_height = torch.max(self.image_height.view(-1))\n image_width = torch.max(self.image_width.view(-1))\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n else:\n image_height = self.image_height[index].item()\n image_width = self.image_width[index].item()\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n return image_coords\n\n def generate_rays( # pylint: disable=too-many-statements\n self,\n camera_indices: Union[TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"], int],\n coords: Optional[TensorType[\"num_rays\":..., 2]] = None,\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n keep_shape: Optional[bool] = None,\n disable_distortion: bool = False,\n aabb_box: Optional[SceneBox] = None,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices.\n\n This function will standardize the input arguments and then call the _generate_rays_from_coords function\n to generate the rays. Our goal is to parse the arguments and then get them into the right shape:\n - camera_indices: (num_rays:..., num_cameras_batch_dims)\n - coords: (num_rays:..., 2)\n - camera_opt_to_camera: (num_rays:..., 3, 4) or None\n - distortion_params_delta: (num_rays:..., 6) or None\n\n Read the docstring for _generate_rays_from_coords for more information on how we generate the rays\n after we have standardized the arguments.\n\n We are only concerned about different combinations of camera_indices and coords matrices, and the following\n are the 4 cases we have to deal with:\n 1. isinstance(camera_indices, int) and coords == None\n - In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)\n 2. isinstance(camera_indices, int) and coords != None\n - In this case, we broadcast camera_indices to the same batch dim as coords\n 3. not isinstance(camera_indices, int) and coords == None\n - In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast\n all our other args to match the new definition of num_rays := (h, w) + num_rays\n 4. not isinstance(camera_indices, int) and coords != None\n - In this case, we have nothing to do, only check that the arguments are of the correct shape\n\n There is one more edge case we need to be careful with: when we have \"jagged cameras\" (ie: different heights\n and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.\n When coords == None (ie: when we render out the whole image associated with this camera), we run into problems\n since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,\n we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,\n regardless of the number of prepended extra batch dimensions in the camera_indices tensor.\n\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n distortion_params_delta: Optional delta for the distortion parameters.\n keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise\n keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the\n camera_indices and coords tensors (if we can).\n disable_distortion: If True, disables distortion.\n aabb_box: if not None will calculate nears and fars of the ray according to aabb box intesection\n\n Returns:\n Rays for the given camera indices and coords.\n \"\"\"\n # Check the argument types to make sure they're valid and all shaped correctly\n assert isinstance(camera_indices, (torch.Tensor, int)), \"camera_indices must be a tensor or int\"\n assert coords is None or isinstance(coords, torch.Tensor), \"coords must be a tensor or None\"\n assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)\n assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)\n if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):\n num_rays_shape = camera_indices.shape[:-1]\n errormsg = \"Batch dims of inputs must match when inputs are all tensors\"\n assert coords.shape[:-1] == num_rays_shape, errormsg\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg\n\n # If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later\n if not self.shape:\n cameras = self.reshape((1,))\n assert torch.all(\n torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0\n ), \"Can only index into single camera with no batch dimensions if index is zero\"\n else:\n cameras = self\n\n # If the camera indices are an int, then we need to make sure that the camera batch is 1D\n if isinstance(camera_indices, int):\n assert (\n len(cameras.shape) == 1\n ), \"camera_indices must be a tensor if cameras are batched with more than 1 batch dimension\"\n camera_indices = torch.tensor([camera_indices], device=cameras.device)\n\n assert camera_indices.shape[-1] == len(\n cameras.shape\n ), \"camera_indices must have shape (num_rays:..., num_cameras_batch_dims)\"\n\n # If keep_shape is True, then we need to make sure that the camera indices in question\n # are all the same height and width and can actually be batched while maintaining the image\n # shape\n if keep_shape is True:\n assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(\n cameras.width[camera_indices] == cameras.width[camera_indices[0]]\n ), \"Can only keep shape if all cameras have the same height and width\"\n\n # If the cameras don't all have same height / width, if coords is not none, we will need to generate\n # a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.\n # Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial\n if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):\n index_dim = camera_indices.shape[-1]\n camera_indices = camera_indices.reshape(-1, index_dim)\n _coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]\n camera_indices = torch.cat(\n [index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],\n )\n coords = torch.cat(_coords, dim=0)\n assert coords.shape[0] == camera_indices.shape[0]\n # Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them\n\n # The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords\n # is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,\n # each image in camera_indices has to have the same shape since otherwise we would have error'd when\n # we checked keep_shape is valid or we aren't jagged.\n if coords is None:\n index_dim = camera_indices.shape[-1]\n index = camera_indices.reshape(-1, index_dim)[0]\n coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)\n coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)\n coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)\n camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None\n camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))\n if camera_opt_to_camera is not None\n else None\n )\n distortion_params_delta = ( # (h, w, num_rays, 6) or None\n distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))\n if distortion_params_delta is not None\n else None\n )\n\n # If camera indices was an int or coords was none, we need to broadcast our indices along batch dims\n camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)\n\n # Checking our tensors have been standardized\n assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)\n assert camera_indices.shape[-1] == len(cameras.shape)\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]\n\n # This will do the actual work of generating the rays now that we have standardized the inputs\n # raybundle.shape == (num_rays) when done\n # pylint: disable=protected-access\n raybundle = cameras._generate_rays_from_coords(\n camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion\n )\n\n # If we have mandated that we don't keep the shape, then we flatten\n if keep_shape is False:\n raybundle = raybundle.flatten()\n\n if aabb_box:\n with torch.no_grad():\n tensor_aabb = Parameter(aabb_box.aabb.flatten(), requires_grad=False)\n\n rays_o = raybundle.origins.contiguous()\n rays_d = raybundle.directions.contiguous()\n\n tensor_aabb = tensor_aabb.to(rays_o.device)\n shape = rays_o.shape\n\n rays_o = rays_o.reshape((-1, 3))\n rays_d = rays_d.reshape((-1, 3))\n\n t_min, t_max = nerfstudio.utils.math.intersect_aabb(rays_o, rays_d, tensor_aabb)\n\n t_min = t_min.reshape([shape[0], shape[1], 1])\n t_max = t_max.reshape([shape[0], shape[1], 1])\n\n raybundle.nears = t_min\n raybundle.fars = t_max\n\n # TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,\n # so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour\n # that we haven't caught yet with tests\n return raybundle\n\n # pylint: disable=too-many-statements\n def _generate_rays_from_coords(\n self,\n camera_indices: TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"],\n coords: TensorType[\"num_rays\":..., 2],\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices and coords where self isn't jagged\n\n This is a fairly complex function, so let's break this down slowly.\n\n Shapes involved:\n - num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated\n - num_cameras_batch_dims: This is the number of dimensions of our camera\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n The shape of this is such that indexing into camera_indices[\"num_rays\":...] will return the\n index into each batch dimension of the camera in order to get the correct camera specified by\n \"num_rays\".\n\n Example:\n >>> cameras = Cameras(...)\n >>> cameras.shape\n (2, 3, 4)\n\n >>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3\n >>> camera_indices.shape\n (3,)\n >>> coords = torch.tensor([1,1])\n >>> coords.shape\n (2,)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()\n >>> out_rays.shape\n ()\n\n >>> camera_indices = torch.tensor([[0,0,0]])\n >>> camera_indices.shape\n (1, 3)\n >>> coords = torch.tensor([[1,1]])\n >>> coords.shape\n (1, 2)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)\n # since we added an extra dimension in front of camera_indices\n >>> out_rays.shape\n (1,)\n\n If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape\n\n The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the\n output shape and if you index into the output RayBundle with some indices [i:...], if you index into\n camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch\n indices into the original cameras object corresponding to that ray (ie: you will get the camera\n from our batched cameras corresponding to the ray at RayBundle[i:...]).\n\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning\n height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will\n get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].\n\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].\n\n distortion_params_delta: Optional delta for the distortion parameters.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].\n\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords. RayBundle.shape == num_rays\n \"\"\"\n # Make sure we're on the right devices\n camera_indices = camera_indices.to(self.device)\n coords = coords.to(self.device)\n\n # Checking to make sure everything is of the right shape and type\n num_rays_shape = camera_indices.shape[:-1]\n assert camera_indices.shape == num_rays_shape + (self.ndim,)\n assert coords.shape == num_rays_shape + (2,)\n assert coords.shape[-1] == 2\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)\n assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)\n\n # Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all\n # of our output rays at each dimension of our cameras object\n true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]\n\n # Get all our focal lengths, principal points and make sure they are the right shapes\n y = coords[..., 0] # (num_rays,) get rid of the last dimension\n x = coords[..., 1] # (num_rays,) get rid of the last dimension\n fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)\n cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)\n assert (\n y.shape == num_rays_shape\n and x.shape == num_rays_shape\n and fx.shape == num_rays_shape\n and fy.shape == num_rays_shape\n and cx.shape == num_rays_shape\n and cy.shape == num_rays_shape\n ), (\n str(num_rays_shape)\n + str(y.shape)\n + str(x.shape)\n + str(fx.shape)\n + str(fy.shape)\n + str(cx.shape)\n + str(cy.shape)\n )\n\n # Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)\n # Also make sure the shapes are correct\n coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)\n assert (\n coord.shape == num_rays_shape + (2,)\n and coord_x_offset.shape == num_rays_shape + (2,)\n and coord_y_offset.shape == num_rays_shape + (2,)\n )\n\n # Stack image coordinates and image coordinates offset by 1, check shapes too\n coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Undistorts our images according to our distortion parameters\n if not disable_distortion:\n distortion_params = None\n if self.distortion_params is not None:\n distortion_params = self.distortion_params[true_indices]\n if distortion_params_delta is not None:\n distortion_params = distortion_params + distortion_params_delta\n elif distortion_params_delta is not None:\n distortion_params = distortion_params_delta\n\n # Do not apply distortion for equirectangular images\n if distortion_params is not None:\n mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n coord_mask = torch.stack([mask, mask, mask], dim=0)\n if mask.any():\n coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(\n coord_stack[coord_mask, :].reshape(3, -1, 2),\n distortion_params[mask, :],\n ).reshape(-1, 2)\n\n # Make sure after we have undistorted our images, the shapes are still correct\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Gets our directions for all our rays in camera coordinates and checks shapes at the end\n # Here, directions_stack is of shape (3, num_rays, 3)\n # directions_stack[0] is the direction for ray in camera coordinates\n # directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x\n # directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y\n cam_types = torch.unique(self.camera_type, sorted=False)\n directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)\n if CameraType.PERSPECTIVE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()\n directions_stack[..., 2][mask] = -1.0\n\n if CameraType.FISHEYE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))\n theta = torch.clip(theta, 0.0, math.pi)\n\n sin_theta = torch.sin(theta)\n\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()\n directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask).float()\n\n if CameraType.EQUIRECTANGULAR.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n # For equirect, fx = fy = height = width/2\n # Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2\n theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed\n phi = torch.pi * (0.5 - coord_stack[..., 1])\n # use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)\n directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()\n directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()\n\n for value in cam_types:\n if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:\n raise ValueError(f\"Camera type {value} not supported.\")\n\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n c2w = self.camera_to_worlds[true_indices]\n assert c2w.shape == num_rays_shape + (3, 4)\n\n if camera_opt_to_camera is not None:\n c2w = pose_utils.multiply(c2w, camera_opt_to_camera)\n rotation = c2w[..., :3, :3] # (..., 3, 3)\n assert rotation.shape == num_rays_shape + (3, 3)\n\n directions_stack = torch.sum(\n directions_stack[..., None, :] * rotation, dim=-1\n ) # (..., 1, 3) * (..., 3, 3) -> (..., 3)\n directions_stack, directions_norm = camera_utils.normalize_with_norm(directions_stack, -1)\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n origins = c2w[..., :3, 3] # (..., 3)\n assert origins.shape == num_rays_shape + (3,)\n\n directions = directions_stack[0]\n assert directions.shape == num_rays_shape + (3,)\n\n # norms of the vector going between adjacent coords, giving us dx and dy per output ray\n dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # (\"num_rays\":...,)\n dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # (\"num_rays\":...,)\n assert dx.shape == num_rays_shape and dy.shape == num_rays_shape\n\n pixel_area = (dx * dy)[..., None] # (\"num_rays\":..., 1)\n assert pixel_area.shape == num_rays_shape + (1,)\n\n times = self.times[camera_indices, 0] if self.times is not None else None\n\n return RayBundle(\n origins=origins,\n directions=directions,\n pixel_area=pixel_area,\n camera_indices=camera_indices,\n times=times,\n metadata={\"directions_norm\": directions_norm[0].detach()},\n )\n\n def to_json(\n self, camera_idx: int, image: Optional[TensorType[\"height\", \"width\", 2]] = None, max_size: Optional[int] = None\n ) -> Dict:\n \"\"\"Convert a camera to a json dictionary.\n\n Args:\n camera_idx: Index of the camera to convert.\n image: An image in range [0, 1] that is encoded to a base64 string.\n max_size: Max size to resize the image to if present.\n\n Returns:\n A JSON representation of the camera\n \"\"\"\n flattened = self.flatten()\n json_ = {\n \"type\": \"PinholeCamera\",\n \"cx\": flattened[camera_idx].cx.item(),\n \"cy\": flattened[camera_idx].cy.item(),\n \"fx\": flattened[camera_idx].fx.item(),\n \"fy\": flattened[camera_idx].fy.item(),\n \"camera_to_world\": self.camera_to_worlds[camera_idx].tolist(),\n \"camera_index\": camera_idx,\n \"times\": flattened[camera_idx].times.item() if self.times is not None else None,\n }\n if image is not None:\n image_uint8 = (image * 255).detach().type(torch.uint8)\n if max_size is not None:\n image_uint8 = image_uint8.permute(2, 0, 1)\n image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore\n image_uint8 = image_uint8.permute(1, 2, 0)\n image_uint8 = image_uint8.cpu().numpy()\n data = cv2.imencode(\".jpg\", image_uint8)[1].tobytes()\n json_[\"image\"] = str(\"data:image/jpeg;base64,\" + base64.b64encode(data).decode(\"ascii\"))\n return json_\n\n def get_intrinsics_matrices(self) -> TensorType[\"num_cameras\":..., 3, 3]:\n \"\"\"Returns the intrinsic matrices for each camera.\n\n Returns:\n Pinhole camera intrinsics matrices\n \"\"\"\n K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)\n K[..., 0, 0] = self.fx.squeeze(-1)\n K[..., 1, 1] = self.fy.squeeze(-1)\n K[..., 0, 2] = self.cx.squeeze(-1)\n K[..., 1, 2] = self.cy.squeeze(-1)\n K[..., 2, 2] = 1.0\n return K\n\n def rescale_output_resolution(\n self, scaling_factor: Union[TensorType[\"num_cameras\":...], TensorType[\"num_cameras\":..., 1], float, int]\n ) -> None:\n \"\"\"Rescale the output resolution of the cameras.\n\n Args:\n scaling_factor: Scaling factor to apply to the output resolution.\n \"\"\"\n if isinstance(scaling_factor, (float, int)):\n scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:\n scaling_factor = scaling_factor.unsqueeze(-1)\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):\n pass\n else:\n raise ValueError(\n f\"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}.\"\n )\n\n self.fx = self.fx * scaling_factor\n self.fy = self.fy * scaling_factor\n self.cx = self.cx * scaling_factor\n self.cy = self.cy * scaling_factor\n self.height = (self.height * scaling_factor).to(torch.int64)\n self.width = (self.width * scaling_factor).to(torch.int64)" }, { "identifier": "CameraType", "path": "nerfstudio/cameras/cameras.py", "snippet": "class CameraType(Enum):\n \"\"\"Supported camera types.\"\"\"\n\n PERSPECTIVE = auto()\n FISHEYE = auto()\n EQUIRECTANGULAR = auto()" }, { "identifier": "to_immutable_dict", "path": "nerfstudio/configs/config_utils.py", "snippet": "def to_immutable_dict(d: Dict[str, Any]):\n \"\"\"Method to convert mutable dict to default factory dict\n\n Args:\n d: dictionary to convert into default factory dict for dataclass\n \"\"\"\n return field(default_factory=lambda: dict(d))" }, { "identifier": "DataParser", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class DataParser:\n \"\"\"A dataset.\n\n Args:\n config: datasetparser config containing all information needed to instantiate dataset\n \"\"\"\n\n config: DataParserConfig\n\n def __init__(self, config: DataParserConfig):\n super().__init__()\n self.config = config\n\n @abstractmethod\n def _generate_dataparser_outputs(self, split: str = \"train\") -> DataparserOutputs:\n \"\"\"Abstract method that returns the dataparser outputs for the given split.\n\n Args:\n split: Which dataset split to generate (train/test).\n\n Returns:\n DataparserOutputs containing data for the specified dataset and split\n \"\"\"\n\n def get_dataparser_outputs(self, split: str = \"train\") -> DataparserOutputs:\n \"\"\"Returns the dataparser outputs for the given split.\n\n Args:\n split: Which dataset split to generate (train/test).\n\n Returns:\n DataparserOutputs containing data for the specified dataset and split\n \"\"\"\n dataparser_outputs = self._generate_dataparser_outputs(split)\n return dataparser_outputs" }, { "identifier": "DataParserConfig", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class DataParserConfig(cfg.InstantiateConfig):\n \"\"\"Basic dataset config\"\"\"\n\n _target: Type = field(default_factory=lambda: DataParser)\n \"\"\"_target: target class to instantiate\"\"\"\n data: Path = Path()\n \"\"\"Directory specifying location of data.\"\"\"" }, { "identifier": "DataparserOutputs", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class DataparserOutputs:\n \"\"\"Dataparser outputs for the which will be used by the DataManager\n for creating RayBundle and RayGT objects.\"\"\"\n\n image_filenames: List[Path]\n \"\"\"Filenames for the images.\"\"\"\n cameras: Cameras\n \"\"\"Camera object storing collection of camera information in dataset.\"\"\"\n alpha_color: Optional[TensorType[3]] = None\n \"\"\"Color of dataset background.\"\"\"\n scene_box: SceneBox = SceneBox()\n \"\"\"Scene box of dataset. Used to bound the scene or provide the scene scale depending on model.\"\"\"\n mask_filenames: Optional[List[Path]] = None\n \"\"\"Filenames for any masks that are required\"\"\"\n metadata: Dict[str, Any] = to_immutable_dict({})\n \"\"\"Dictionary of any metadata that be required for the given experiment.\n Will be processed by the InputDataset to create any additional tensors that may be required.\n \"\"\"\n dataparser_transform: TensorType[3, 4] = torch.eye(4)[:3, :]\n \"\"\"Transform applied by the dataparser.\"\"\"\n dataparser_scale: float = 1.0\n \"\"\"Scale applied by the dataparser.\"\"\"\n\n def as_dict(self) -> dict:\n \"\"\"Returns the dataclass as a dictionary.\"\"\"\n return vars(self)\n\n def save_dataparser_transform(self, path: Path):\n \"\"\"Save dataparser transform to json file. Some dataparsers will apply a transform to the poses,\n this method allows the transform to be saved so that it can be used in other applications.\n\n Args:\n path: path to save transform to\n \"\"\"\n data = {\n \"transform\": self.dataparser_transform.tolist(),\n \"scale\": float(self.dataparser_scale),\n }\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n with open(path, \"w\", encoding=\"UTF-8\") as file:\n json.dump(data, file, indent=4)" }, { "identifier": "SceneBox", "path": "nerfstudio/data/scene_box.py", "snippet": "class SceneBox:\n \"\"\"Data to represent the scene box.\"\"\"\n\n aabb: TensorType[2, 3] = None\n \"\"\"aabb: axis-aligned bounding box.\n aabb[0] is the minimum (x,y,z) point.\n aabb[1] is the maximum (x,y,z) point.\"\"\"\n\n def get_diagonal_length(self):\n \"\"\"Returns the longest diagonal length.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n length = torch.sqrt((diff**2).sum() + 1e-20)\n return length\n\n def get_center(self):\n \"\"\"Returns the center of the box.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n return self.aabb[0] + diff / 2.0\n\n def get_centered_and_scaled_scene_box(self, scale_factor: Union[float, torch.Tensor] = 1.0):\n \"\"\"Returns a new box that has been shifted and rescaled to be centered\n about the origin.\n\n Args:\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n return SceneBox(aabb=(self.aabb - self.get_center()) * scale_factor)\n\n @staticmethod\n def get_normalized_positions(positions: TensorType[..., 3], aabb: TensorType[2, 3]):\n \"\"\"Return normalized positions in range [0, 1] based on the aabb axis-aligned bounding box.\n\n Args:\n positions: the xyz positions\n aabb: the axis-aligned bounding box\n \"\"\"\n aabb_lengths = aabb[1] - aabb[0]\n normalized_positions = (positions - aabb[0]) / aabb_lengths\n return normalized_positions\n\n def to_json(self) -> Dict:\n \"\"\"Returns a json object from the Python object.\"\"\"\n return {\"type\": \"aabb\", \"min_point\": self.aabb[0].tolist(), \"max_point\": self.aabb[1].tolist()}\n\n @staticmethod\n def from_json(json_: Dict) -> \"SceneBox\":\n \"\"\"Returns the an instance of SceneBox from a json dictionary.\n\n Args:\n json_: the json dictionary containing scene box information\n \"\"\"\n assert json_[\"type\"] == \"aabb\"\n aabb = torch.tensor([json_[0], json_[1]])\n return SceneBox(aabb=aabb)\n\n @staticmethod\n def from_camera_poses(poses: TensorType[..., 3, 4], scale_factor: float) -> \"SceneBox\":\n \"\"\"Returns the instance of SceneBox that fully envelopes a set of poses\n\n Args:\n poses: tensor of camera pose matrices\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n xyzs = poses[..., :3, -1]\n aabb = torch.stack([torch.min(xyzs, dim=0)[0], torch.max(xyzs, dim=0)[0]])\n return SceneBox(aabb=aabb * scale_factor)" }, { "identifier": "load_from_json", "path": "nerfstudio/utils/io.py", "snippet": "def load_from_json(filename: Path):\n \"\"\"Load a dictionary from a JSON filename.\n\n Args:\n filename: The filename to load from.\n \"\"\"\n assert filename.suffix == \".json\"\n with open(filename, encoding=\"UTF-8\") as file:\n return json.load(file)" } ]
import json import math import os import cv2 import numpy as np import torch from copy import deepcopy from dataclasses import dataclass, field from pathlib import Path, PurePath from typing import List, Optional, Type from typing import * from PIL import Image from rich.console import Console from typing_extensions import Literal from nerfstudio.cameras import camera_utils from nerfstudio.cameras.cameras import CAMERA_MODEL_TO_TYPE, Cameras, CameraType from nerfstudio.configs.config_utils import to_immutable_dict from nerfstudio.data.dataparsers.base_dataparser import ( DataParser, DataParserConfig, DataparserOutputs, ) from nerfstudio.data.scene_box import SceneBox from nerfstudio.utils.io import load_from_json from torchtyping import TensorType
12,904
from __future__ import annotations CONSOLE = Console(width=120) MAX_AUTO_RESOLUTION = 1600 @dataclass class VideoDataParserOutputs: data_dir: Path video_filenames: List[Path] start_frame: int num_frames: int """Dataparser outputs for the which will be used by the DataManager for creating RayBundle and RayGT objects.""" """Filenames for the images."""
from __future__ import annotations CONSOLE = Console(width=120) MAX_AUTO_RESOLUTION = 1600 @dataclass class VideoDataParserOutputs: data_dir: Path video_filenames: List[Path] start_frame: int num_frames: int """Dataparser outputs for the which will be used by the DataManager for creating RayBundle and RayGT objects.""" """Filenames for the images."""
cameras: Cameras
2
2023-10-26 04:39:15+00:00
16k
Trustworthy-AI-Group/TransferAttack
transferattack/model_related/ghost.py
[ { "identifier": "Attack", "path": "transferattack/attack.py", "snippet": "class Attack(object):\n \"\"\"\n Base class for all attacks.\n \"\"\"\n def __init__(self, attack, model_name, epsilon, targeted, random_start, norm, loss, device=None):\n \"\"\"\n Initialize the hyperparameters\n\n Arguments:\n attack (str): the name of attack.\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \"\"\"\n if norm not in ['l2', 'linfty']:\n raise Exception(\"Unsupported norm {}\".format(norm))\n self.attack = attack\n self.model = self.load_model(model_name)\n self.epsilon = epsilon\n self.targeted = targeted\n self.random_start = random_start\n self.norm = norm\n if isinstance(self.model, EnsembleModel):\n self.device = self.model.device\n else:\n self.device = next(self.model.parameters()).device if device is None else device\n self.loss = self.loss_function(loss)\n\n def load_model(self, model_name):\n \"\"\"\n The model Loading stage, which should be overridden when surrogate model is customized (e.g., DSM, SETR, etc.)\n Prioritize the model in torchvision.models, then timm.models\n\n Arguments:\n model_name (str/list): the name of surrogate model in model_list in utils.py\n\n Returns:\n model (torch.nn.Module): the surrogate model wrapped by wrap_model in utils.py\n \"\"\"\n def load_single_model(model_name):\n if model_name in models.__dict__.keys():\n print('=> Loading model {} from torchvision.models'.format(model_name))\n model = models.__dict__[model_name](weights=\"DEFAULT\")\n elif model_name in timm.list_models():\n print('=> Loading model {} from timm.models'.format(model_name))\n model = timm.create_model(model_name, pretrained=True)\n else:\n raise ValueError('Model {} not supported'.format(model_name))\n return wrap_model(model.eval().cuda())\n\n if isinstance(model_name, list):\n return EnsembleModel([load_single_model(name) for name in model_name])\n else:\n return load_single_model(model_name)\n\n def forward(self, data, label, **kwargs):\n \"\"\"\n The general attack procedure\n\n Arguments:\n data (N, C, H, W): tensor for input images\n labels (N,): tensor for ground-truth labels if untargetd\n labels (2,N): tensor for [ground-truth, targeted labels] if targeted\n \"\"\"\n if self.targeted:\n assert len(label) == 2\n label = label[1] # the second element is the targeted label tensor\n data = data.clone().detach().to(self.device)\n label = label.clone().detach().to(self.device)\n\n # Initialize adversarial perturbation\n delta = self.init_delta(data)\n\n momentum = 0\n for _ in range(self.epoch):\n # Obtain the output\n logits = self.get_logits(self.transform(data+delta, momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad = self.get_grad(loss, delta)\n\n # Calculate the momentum\n momentum = self.get_momentum(grad, momentum)\n\n # Update adversarial perturbation\n delta = self.update_delta(delta, data, momentum, self.alpha)\n\n return delta.detach()\n\n def get_logits(self, x, **kwargs):\n \"\"\"\n The inference stage, which should be overridden when the attack need to change the models (e.g., ensemble-model attack, ghost, etc.) or the input (e.g. DIM, SIM, etc.)\n \"\"\"\n return self.model(x)\n\n def get_loss(self, logits, label):\n \"\"\"\n The loss calculation, which should be overrideen when the attack change the loss calculation (e.g., ATA, etc.)\n \"\"\"\n # Calculate the loss\n return -self.loss(logits, label) if self.targeted else self.loss(logits, label)\n\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n The gradient calculation, which should be overridden when the attack need to tune the gradient (e.g., TIM, variance tuning, enhanced momentum, etc.)\n \"\"\"\n return torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n\n def get_momentum(self, grad, momentum, **kwargs):\n \"\"\"\n The momentum calculation\n \"\"\"\n return momentum * self.decay + grad / (grad.abs().mean(dim=(1,2,3), keepdim=True))\n\n def init_delta(self, data, **kwargs):\n delta = torch.zeros_like(data).to(self.device)\n if self.random_start:\n if self.norm == 'linfty':\n delta.uniform_(-self.epsilon, self.epsilon)\n else:\n delta.normal_(-self.epsilon, self.epsilon)\n d_flat = delta.view(delta.size(0), -1)\n n = d_flat.norm(p=2, dim=10).view(delta.size(0), 1, 1, 1)\n r = torch.zeros_like(data).uniform_(0,1).to(self.device)\n delta *= r/n*self.epsilon\n delta = clamp(delta, img_min-data, img_max-data)\n delta.requires_grad = True\n return delta\n\n def update_delta(self, delta, data, grad, alpha, **kwargs):\n if self.norm == 'linfty':\n delta = torch.clamp(delta + alpha * grad.sign(), -self.epsilon, self.epsilon)\n else:\n grad_norm = torch.norm(grad.view(grad.size(0), -1), dim=1).view(-1, 1, 1, 1)\n scaled_grad = grad / (grad_norm + 1e-20)\n delta = (delta + scaled_grad * alpha).view(delta.size(0), -1).renorm(p=2, dim=0, maxnorm=self.epsilon).view_as(delta)\n delta = clamp(delta, img_min-data, img_max-data)\n return delta\n\n def loss_function(self, loss):\n \"\"\"\n Get the loss function\n \"\"\"\n if loss == 'crossentropy':\n return nn.CrossEntropyLoss()\n else:\n raise Exception(\"Unsupported loss {}\".format(loss))\n\n def transform(self, data, **kwargs):\n return data\n\n def __call__(self, *input, **kwargs):\n self.model.eval()\n return self.forward(*input, **kwargs)" }, { "identifier": "ghost_resnet101", "path": "transferattack/model_related/ghost_networks/resnet.py", "snippet": "@register_model\n@handle_legacy_interface(weights=(\"pretrained\", ResNet101_Weights.IMAGENET1K_V1))\ndef ghost_resnet101(*, ghost_random_range=0.16, weights: Optional[ResNet101_Weights] = None, progress: bool = True, **kwargs: Any) -> GhostResNet:\n \"\"\"ResNet-101 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.\n\n .. note::\n The bottleneck of TorchVision places the stride for downsampling to the second 3x3\n convolution while the original paper places it to the first 1x1 convolution.\n This variant improves the accuracy and is known as `ResNet V1.5\n <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.\n\n Args:\n weights (:class:`~torchvision.models.ResNet101_Weights`, optional): The\n pretrained weights to use. See\n :class:`~torchvision.models.ResNet101_Weights` below for\n more details, and possible values. By default, no pre-trained\n weights are used.\n progress (bool, optional): If True, displays a progress bar of the\n download to stderr. Default is True.\n **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``\n base class. Please refer to the `source code\n <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_\n for more details about this class.\n\n .. autoclass:: torchvision.models.ResNet101_Weights\n :members:\n \"\"\"\n weights = ResNet101_Weights.verify(weights)\n\n return _resnet(GhostBottleneck, ghost_random_range, [3, 4, 23, 3], weights, progress, **kwargs)" }, { "identifier": "ghost_resnet152", "path": "transferattack/model_related/ghost_networks/resnet.py", "snippet": "@register_model\n@handle_legacy_interface(weights=(\"pretrained\", ResNet152_Weights.IMAGENET1K_V1))\ndef ghost_resnet152(*, ghost_random_range=0.12, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> GhostResNet:\n \"\"\"ResNet-152 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.\n\n .. note::\n The bottleneck of TorchVision places the stride for downsampling to the second 3x3\n convolution while the original paper places it to the first 1x1 convolution.\n This variant improves the accuracy and is known as `ResNet V1.5\n <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.\n\n Args:\n weights (:class:`~torchvision.models.ResNet152_Weights`, optional): The\n pretrained weights to use. See\n :class:`~torchvision.models.ResNet152_Weights` below for\n more details, and possible values. By default, no pre-trained\n weights are used.\n progress (bool, optional): If True, displays a progress bar of the\n download to stderr. Default is True.\n **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``\n base class. Please refer to the `source code\n <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_\n for more details about this class.\n\n .. autoclass:: torchvision.models.ResNet152_Weights\n :members:\n \"\"\"\n weights = ResNet152_Weights.verify(weights)\n\n return _resnet(GhostBottleneck, ghost_random_range, [3, 8, 36, 3], weights, progress, **kwargs)" }, { "identifier": "MIFGSM", "path": "transferattack/gradient/mifgsm.py", "snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay" }, { "identifier": "NIFGSM", "path": "transferattack/gradient/nifgsm.py", "snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum" }, { "identifier": "VMIFGSM", "path": "transferattack/gradient/vmifgsm.py", "snippet": "class VMIFGSM(Attack):\n \"\"\"\n VMI-FGSM Attack\n 'Enhancing the transferability of adversarial attacks through variance tuning (CVPR 2021)'(https://arxiv.org/abs/2103.15571)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n beta (float): the relative value for the neighborhood.\n num_neighbor (int): the number of samples for estimating the gradient variance.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, beta=1.5, num_neighbor=20, epoch=10, decay=1.\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, beta=1.5, num_neighbor=20, epoch=10, decay=1., targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='VMI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.radius = beta * epsilon\n self.epoch = epoch\n self.decay = decay\n self.num_neighbor = num_neighbor\n\n def get_variance(self, data, delta, label, cur_grad, momentum, **kwargs):\n \"\"\"\n Calculate the gradient variance \n \"\"\"\n grad = 0\n for _ in range(self.num_neighbor):\n # Obtain the output\n # This is inconsistent for transform!\n logits = self.get_logits(self.transform(data+delta+torch.zeros_like(delta).uniform_(-self.radius, self.radius).to(self.device), momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad += self.get_grad(loss, delta)\n\n return grad / self.num_neighbor - cur_grad\n\n def forward(self, data, label, **kwargs):\n \"\"\"\n The attack procedure for VMI-FGSM\n\n Arguments:\n data: (N, C, H, W) tensor for input images\n labels: (N,) tensor for ground-truth labels if untargetd, otherwise targeted labels\n \"\"\"\n if self.targeted:\n assert len(label) == 2\n label = label[1] # the second element is the targeted label tensor\n data = data.clone().detach().to(self.device)\n label = label.clone().detach().to(self.device)\n\n # Initialize adversarial perturbation\n delta = self.init_delta(data)\n\n momentum, variance = 0, 0\n for _ in range(self.epoch):\n # Obtain the output\n logits = self.get_logits(self.transform(data+delta, momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad = self.get_grad(loss, delta)\n\n # Calculate the momentum\n momentum = self.get_momentum(grad+variance, momentum)\n\n # Calculate the variance\n variance = self.get_variance(data, delta, label, grad, momentum)\n\n # Update adversarial perturbation\n delta = self.update_delta(delta, data, momentum, self.alpha)\n\n return delta.detach()" }, { "identifier": "DIM", "path": "transferattack/input_transformation/dim.py", "snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)" }, { "identifier": "TIM", "path": "transferattack/input_transformation/tim.py", "snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad" }, { "identifier": "SIM", "path": "transferattack/input_transformation/sim.py", "snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))" }, { "identifier": "Admix", "path": "transferattack/input_transformation/admix.py", "snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))" }, { "identifier": "MIFGSM", "path": "transferattack/gradient/mifgsm.py", "snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay" }, { "identifier": "NIFGSM", "path": "transferattack/gradient/nifgsm.py", "snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum" }, { "identifier": "DIM", "path": "transferattack/input_transformation/dim.py", "snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)" }, { "identifier": "TIM", "path": "transferattack/input_transformation/tim.py", "snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad" }, { "identifier": "SIM", "path": "transferattack/input_transformation/sim.py", "snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))" }, { "identifier": "Admix", "path": "transferattack/input_transformation/admix.py", "snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))" } ]
from ..utils import * from ..attack import Attack from .ghost_networks.resnet import ghost_resnet101, ghost_resnet152 from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..gradient.vmifgsm import VMIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix from torch import Tensor from ..utils import * from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix
11,180
# example bash: python main.py --attack=ghost_network support_models = { "resnet101": ghost_resnet101, "resnet152": ghost_resnet152, } class GhostNetwork_MIFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
# example bash: python main.py --attack=ghost_network support_models = { "resnet101": ghost_resnet101, "resnet152": ghost_resnet152, } class GhostNetwork_MIFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
class GhostNetwork_NIFGSM(NIFGSM):
11
2023-10-31 03:43:26+00:00
16k
chenruduan/OAReactDiff
demo.py
[ { "identifier": "LEFTNet", "path": "oa_reactdiff/model/leftnet.py", "snippet": "class LEFTNet(torch.nn.Module):\n r\"\"\"\n LEFTNet\n\n Args:\n pos_require_grad (bool, optional): If set to :obj:`True`, will require to take derivative of model output with respect to the atomic positions. (default: :obj:`False`)\n cutoff (float, optional): Cutoff distance for interatomic interactions. (default: :obj:`5.0`)\n num_layers (int, optional): Number of building blocks. (default: :obj:`4`)\n hidden_channels (int, optional): Hidden embedding size. (default: :obj:`128`)\n num_radial (int, optional): Number of radial basis functions. (default: :obj:`96`)\n y_mean (float, optional): Mean value of the labels of training data. (default: :obj:`0`)\n y_std (float, optional): Standard deviation of the labels of training data. (default: :obj:`1`)\n\n \"\"\"\n\n def __init__(\n self,\n pos_require_grad=False,\n cutoff=10.0,\n num_layers=4,\n hidden_channels=128,\n num_radial=96,\n in_hidden_channels: int = 8,\n reflect_equiv: bool = True,\n legacy: bool = True,\n update: bool = True,\n pos_grad: bool = False,\n single_layer_output: bool = True,\n for_conf: bool = False,\n ff: bool = False,\n object_aware: bool = True,\n **kwargs,\n ):\n super(LEFTNet, self).__init__()\n self.num_layers = num_layers\n self.hidden_channels = hidden_channels\n self.cutoff = cutoff\n self.pos_require_grad = pos_require_grad\n self.reflect_equiv = reflect_equiv\n self.legacy = legacy\n self.update = update\n self.pos_grad = pos_grad\n self.for_conf = for_conf\n self.ff = ff\n self.object_aware = object_aware\n\n self.embedding = nn.Linear(in_hidden_channels, hidden_channels)\n self.embedding_out = nn.Linear(hidden_channels, in_hidden_channels)\n self.radial_emb = RBFEmb(num_radial, self.cutoff)\n self.neighbor_emb = NeighborEmb(hidden_channels, in_hidden_channels)\n self.s2v = CFConvS2V(hidden_channels)\n\n self.radial_lin = nn.Sequential(\n nn.Linear(num_radial, hidden_channels),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels, hidden_channels),\n )\n\n self.lin3 = nn.Sequential(\n nn.Linear(3, hidden_channels // 4),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 4, 1),\n )\n self.pos_expansion = MLP(\n in_dim=3,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n last_layer_no_activation=True,\n bias=False,\n )\n if self.legacy:\n self.distance_embedding = MLP(\n in_dim=num_radial,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n bias=False,\n )\n if self.pos_grad:\n self.dynamic_mlp_modules = nn.Sequential(\n nn.Linear(hidden_channels, hidden_channels // 2),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 2, 3),\n )\n\n self.gcl_layers = nn.ModuleList()\n self.message_layers = nn.ModuleList()\n self.update_layers = nn.ModuleList()\n\n for _ in range(num_layers):\n self.gcl_layers.append(\n GCLMessage(hidden_channels, num_radial, legacy=legacy)\n )\n self.message_layers.append(\n EquiMessage(hidden_channels, num_radial, reflect_equiv).jittable()\n )\n self.update_layers.append(EquiUpdate(hidden_channels, reflect_equiv))\n\n self.last_layer = nn.Linear(hidden_channels, 1)\n\n self.inv_sqrt_2 = 1 / math.sqrt(2.0)\n self.out_pos = EquiOutput(\n hidden_channels,\n out_channels=1,\n single_layer_output=single_layer_output,\n )\n\n # for node-wise frame\n self.vec = vector()\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.radial_emb.reset_parameters()\n\n def scalarization(self, pos, edge_index):\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n coord_diff = pos[i] - pos[j]\n radial = torch.sum((coord_diff) ** 2, 1).unsqueeze(1)\n coord_cross = torch.cross(pos[i], pos[j])\n norm = torch.sqrt(radial) + EPS\n coord_diff = coord_diff / norm\n cross_norm = (torch.sqrt(torch.sum((coord_cross) ** 2, 1).unsqueeze(1))) + EPS\n coord_cross = coord_cross / cross_norm\n coord_vertical = torch.cross(coord_diff, coord_cross)\n\n return dist, coord_diff, coord_cross, coord_vertical\n\n @staticmethod\n def assemble_nodemask(edge_index: Tensor, pos: Tensor):\n node_mask = torch.zeros(pos.size(0), device=pos.device)\n node_mask[:] = -1\n _i, _j = edge_index\n _ind = 0\n for center in range(pos.size(0)):\n if node_mask[center] > -1:\n continue\n _connected = _j[torch.where(_i == center)]\n _connected = torch.concat(\n [_connected, torch.tensor([center], device=pos.device)]\n )\n node_mask[_connected] = _ind\n _ind += 1\n return node_mask\n\n def forward(\n self,\n h: Tensor,\n pos: Tensor,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n node_mask: Optional[Tensor] = None,\n edge_mask: Optional[Tensor] = None,\n update_coords_mask: Optional[Tensor] = None,\n subgraph_mask: Optional[Tensor] = None,\n ):\n # if self.pos_require_grad:\n # pos.requires_grad_()\n\n if not self.object_aware:\n subgraph_mask = None\n\n i, j = edge_index\n\n # embed z, assuming last column is atom number\n z_emb = self.embedding(h)\n\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n inner_subgraph_mask = torch.zeros(edge_index.size(1), 1, device=dist.device)\n inner_subgraph_mask[torch.where(dist < self.cutoff)[0]] = 1\n\n all_edge_masks = inner_subgraph_mask\n if subgraph_mask is not None:\n all_edge_masks = all_edge_masks * subgraph_mask\n\n edge_index_w_cutoff = edge_index.T[torch.where(all_edge_masks > 0)[0]].T\n node_mask_w_cutoff = self.assemble_nodemask(\n edge_index=edge_index_w_cutoff, pos=pos\n )\n\n pos_frame = pos.clone()\n pos_frame = remove_mean_batch(pos_frame, node_mask_w_cutoff.long())\n\n # bulid edge-wise frame and scalarization vector features for edge update\n dist, coord_diff, coord_cross, coord_vertical = self.scalarization(\n pos_frame, edge_index\n )\n\n dist = dist * all_edge_masks.squeeze(-1)\n coord_diff = coord_diff * all_edge_masks\n coord_cross = coord_cross * all_edge_masks\n coord_vertical = coord_vertical * all_edge_masks\n\n frame = torch.cat(\n (\n coord_diff.unsqueeze(-1),\n coord_cross.unsqueeze(-1),\n coord_vertical.unsqueeze(-1),\n ),\n dim=-1,\n )\n radial_emb = self.radial_emb(dist)\n radial_emb = radial_emb * all_edge_masks\n\n f = self.radial_lin(radial_emb)\n rbounds = 0.5 * (torch.cos(dist * pi / self.cutoff) + 1.0)\n f = rbounds.unsqueeze(-1) * f\n\n # init node features\n s = self.neighbor_emb(h, z_emb, edge_index, f)\n\n NE1 = self.s2v(s, coord_diff.unsqueeze(-1), edge_index, f)\n scalrization1 = torch.sum(NE1[i].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n scalrization2 = torch.sum(NE1[j].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n if self.reflect_equiv:\n scalrization1[:, 1, :] = torch.abs(scalrization1[:, 1, :].clone())\n scalrization2[:, 1, :] = torch.abs(scalrization2[:, 1, :].clone())\n\n scalar3 = (\n self.lin3(torch.permute(scalrization1, (0, 2, 1)))\n + torch.permute(scalrization1, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n scalar4 = (\n self.lin3(torch.permute(scalrization2, (0, 2, 1)))\n + torch.permute(scalrization2, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n edgeweight = torch.cat((scalar3, scalar4), dim=-1) * rbounds.unsqueeze(-1)\n edgeweight = torch.cat((edgeweight, f), dim=-1)\n # add distance embedding\n edgeweight = torch.cat((edgeweight, radial_emb), dim=-1)\n\n # bulid node-wise frame for node-update\n a = pos_frame\n if self.legacy:\n b = self.vec(pos_frame, edge_index)\n else:\n # Added by Chenru: for new implementation of constructing node frame.\n eff_edge_ij = torch.where(all_edge_masks.squeeze(-1) == 1)[0]\n eff_edge_index = edge_index[:, eff_edge_ij]\n eff_dist = dist[eff_edge_ij]\n b = nn_vector(eff_dist, eff_edge_index, pos_frame)\n # assert_rot_equiv(nn_vector, dist_pad, edge_index, pos) # for debugging\n\n x1 = (a - b) / ((torch.sqrt(torch.sum((a - b) ** 2, 1).unsqueeze(1))) + EPS)\n y1 = torch.cross(a, b)\n normy = (torch.sqrt(torch.sum(y1**2, 1).unsqueeze(1))) + EPS\n y1 = y1 / normy\n # assert torch.trace(torch.matmul(x1, torch.transpose(y1, 0, 1))) < EPS # for debugging\n\n z1 = torch.cross(x1, y1)\n nodeframe = torch.cat(\n (x1.unsqueeze(-1), y1.unsqueeze(-1), z1.unsqueeze(-1)), dim=-1\n )\n\n pos_prjt = torch.sum(pos_frame.unsqueeze(-1) * nodeframe, dim=1)\n\n vec = torch.zeros(s.size(0), 3, s.size(1), device=s.device)\n gradient = torch.zeros(s.size(0), 3, device=s.device)\n for i in range(self.num_layers):\n # Added by Chenru: for letting multiple objects message passing.\n if self.legacy or i == 0:\n s = s + self.pos_expansion(pos_prjt)\n s, edgeweight = self.gcl_layers[i](\n s,\n edge_index,\n edgeweight,\n )\n\n dx, dvec = self.message_layers[i](\n s,\n vec,\n edge_index,\n radial_emb,\n edgeweight,\n coord_diff,\n coord_cross,\n )\n s = s + dx\n vec = vec + dvec\n s = s * self.inv_sqrt_2\n\n if self.update:\n dx, dvec = self.update_layers[i](s, vec, nodeframe)\n s = s + dx\n vec = vec + dvec\n\n if self.pos_grad:\n dynamic_coff = self.dynamic_mlp_modules(s) # (node, 3)\n basis_mix = (\n dynamic_coff[:, :1] * x1\n + dynamic_coff[:, 1:2] * y1\n + dynamic_coff[:, 2:3] * z1\n )\n gradient = gradient + basis_mix / self.num_layers\n\n if self.for_conf:\n return s\n\n _, dpos = self.out_pos(s, vec)\n\n if update_coords_mask is not None:\n dpos = update_coords_mask * dpos\n pos = pos + dpos + gradient\n\n if self.ff:\n return s, dpos\n\n h = self.embedding_out(s)\n if node_mask is not None:\n h = h * node_mask\n edge_attr = None\n return h, pos, edge_attr" }, { "identifier": "generate_full_eij", "path": "oa_reactdiff/tests/model/utils.py", "snippet": "def generate_full_eij(n_atom: int):\n r\"\"\"Get fully-connected graphs for n_atoms.\"\"\"\n edge_index = []\n for ii in range(n_atom):\n for jj in range(n_atom):\n if ii != jj:\n edge_index.append([ii, jj])\n return torch.transpose(torch.Tensor(edge_index), 1, 0).long()" }, { "identifier": "get_cut_graph_mask", "path": "oa_reactdiff/tests/model/utils.py", "snippet": "def get_cut_graph_mask(edge_index, n_cut):\n r\"\"\"Get mask for a graph cut at n_cut, with ij representing cross-subgraph edgs being 0.\"\"\"\n ind_sum = torch.where(edge_index < n_cut, 1, 0).sum(dim=0)\n subgraph_mask = torch.zeros(edge_index.size(1)).long()\n subgraph_mask[ind_sum == 2] = 1\n subgraph_mask[ind_sum == 0] = 1\n subgraph_mask = subgraph_mask[:, None]\n return subgraph_mask" }, { "identifier": "DDPMModule", "path": "oa_reactdiff/trainer/pl_trainer.py", "snippet": "class DDPMModule(LightningModule):\n def __init__(\n self,\n model_config: Dict,\n optimizer_config: Dict,\n training_config: Dict,\n node_nfs: List[int] = [9] * 3,\n edge_nf: int = 4,\n condition_nf: int = 3,\n fragment_names: List[str] = [\"inorg_node\", \"org_edge\", \"org_node\"],\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n norm_values: Tuple = (1.0, 1.0, 1.0),\n norm_biases: Tuple = (0.0, 0.0, 0.0),\n noise_schedule: str = \"polynomial_2\",\n timesteps: int = 1000,\n precision: float = 1e-5,\n loss_type: str = \"l2\",\n pos_only: bool = False,\n process_type: Optional[str] = None,\n model: nn.Module = None,\n enforce_same_encoding: Optional[List] = None,\n scales: List[float] = [1.0, 1.0, 1.0],\n eval_epochs: int = 20,\n source: Optional[Dict] = None,\n fixed_idx: Optional[List] = None,\n ) -> None:\n super().__init__()\n egnn_dynamics = EGNNDynamics(\n model_config=model_config,\n node_nfs=node_nfs,\n edge_nf=edge_nf,\n condition_nf=condition_nf,\n fragment_names=fragment_names,\n pos_dim=pos_dim,\n update_pocket_coords=update_pocket_coords,\n condition_time=condition_time,\n edge_cutoff=edge_cutoff,\n model=model,\n enforce_same_encoding=enforce_same_encoding,\n source=source,\n )\n\n normalizer = Normalizer(\n norm_values=norm_values,\n norm_biases=norm_biases,\n pos_dim=pos_dim,\n )\n\n gamma_module = PredefinedNoiseSchedule(\n noise_schedule=noise_schedule,\n timesteps=timesteps,\n precision=precision,\n )\n schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)\n\n self.ddpm = EnVariationalDiffusion(\n dynamics=egnn_dynamics,\n schdule=schedule,\n normalizer=normalizer,\n size_histogram=None,\n loss_type=loss_type,\n pos_only=pos_only,\n fixed_idx=fixed_idx,\n )\n self.model_config = model_config\n self.optimizer_config = optimizer_config\n self.training_config = training_config\n self.loss_type = loss_type\n self.n_fragments = len(fragment_names)\n self.remove_h = training_config[\"remove_h\"]\n self.pos_only = pos_only\n self.process_type = process_type or \"QM9\"\n self.scales = scales\n\n sampling_gamma_module = PredefinedNoiseSchedule(\n noise_schedule=\"polynomial_2\",\n timesteps=150,\n precision=precision,\n )\n self.sampling_schedule = DiffSchedule(\n gamma_module=sampling_gamma_module,\n norm_values=norm_values,\n )\n self.eval_epochs = eval_epochs\n\n self.clip_grad = training_config[\"clip_grad\"]\n if self.clip_grad:\n self.gradnorm_queue = utils.Queue()\n self.gradnorm_queue.add(3000)\n self.save_hyperparameters()\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.ddpm.parameters(), **self.optimizer_config)\n if not self.training_config[\"lr_schedule_type\"] is None:\n scheduler_func = LR_SCHEDULER[self.training_config[\"lr_schedule_type\"]]\n scheduler = scheduler_func(\n optimizer=optimizer, **self.training_config[\"lr_schedule_config\"]\n )\n return [optimizer], [scheduler]\n else:\n return optimizer\n\n def setup(self, stage: Optional[str] = None):\n func = PROCESS_FUNC[self.process_type]\n ft = FILE_TYPE[self.process_type]\n if stage == \"fit\":\n self.train_dataset = func(\n Path(self.training_config[\"datadir\"], f\"train_addprop{ft}\"),\n **self.training_config,\n )\n self.training_config[\"reflection\"] = False # Turn off reflection in val.\n self.val_dataset = func(\n Path(self.training_config[\"datadir\"], f\"valid_addprop{ft}\"),\n **self.training_config,\n )\n elif stage == \"test\":\n self.test_dataset = func(\n Path(self.training_config[\"datadir\"], f\"test{ft}\"),\n **self.training_config,\n )\n else:\n raise NotImplementedError\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n self.training_config[\"bz\"],\n shuffle=True,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.train_dataset.collate_fn,\n )\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.val_dataset,\n self.training_config[\"bz\"],\n shuffle=False,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.val_dataset.collate_fn,\n )\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n self.training_config[\"bz\"],\n shuffle=False,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.test_dataset.collate_fn,\n )\n\n def compute_loss(self, batch):\n representations, conditions = batch\n loss_terms = self.ddpm.forward(\n representations,\n conditions,\n )\n info = {}\n if not self.pos_only:\n denoms = [\n (self.ddpm.pos_dim + self.ddpm.node_nfs[ii])\n * representations[ii][\"size\"]\n for ii in range(self.n_fragments)\n ]\n else:\n denoms = [\n self.ddpm.pos_dim * representations[ii][\"size\"]\n for ii in range(self.n_fragments)\n ]\n error_t_normalized = [\n loss_terms[\"error_t\"][ii] / denoms[ii] * self.scales[ii]\n for ii in range(self.n_fragments)\n ]\n if self.loss_type == \"l2\" and self.training:\n # normalize loss_t\n loss_t = torch.stack(error_t_normalized, dim=0).sum(dim=0)\n\n # normalize loss_0\n loss_0_x = [\n loss_terms[\"loss_0_x\"][ii]\n * self.scales[ii]\n / (self.ddpm.pos_dim * representations[ii][\"size\"])\n for ii in range(self.n_fragments)\n ]\n loss_0_x = torch.stack(loss_0_x, dim=0).sum(dim=0)\n loss_0_cat = torch.stack(loss_terms[\"loss_0_cat\"], dim=0).sum(dim=0)\n loss_0_charge = torch.stack(loss_terms[\"loss_0_charge\"], dim=0).sum(dim=0)\n loss_0 = loss_0_x + loss_0_cat + loss_0_charge\n\n # VLB objective or evaluation step\n else:\n # Note: SNR_weight should be negative\n error_t = [\n -self.ddpm.T * 0.5 * loss_terms[\"SNR_weight\"] * _error_t\n for _error_t in loss_terms[\"error_t\"]\n ]\n loss_t = torch.stack(error_t, dim=0).sum(dim=0)\n\n loss_0_x = torch.stack(loss_terms[\"loss_0_x\"], dim=0).sum(dim=0)\n loss_0_cat = torch.stack(loss_terms[\"loss_0_cat\"], dim=0).sum(dim=0)\n loss_0_charge = torch.stack(loss_terms[\"loss_0_charge\"], dim=0).sum(dim=0)\n loss_0 = (\n loss_0_x + loss_0_cat + loss_0_charge + loss_terms[\"neg_log_constants\"]\n )\n\n nll = loss_t + loss_0 + loss_terms[\"kl_prior\"]\n # nll = loss_t\n\n for ii in range(self.n_fragments):\n info[f\"error_t_{ii}\"] = error_t_normalized[ii].mean().item() / (\n self.scales[ii] + 1e-4\n )\n info[f\"unorm_error_t_{ii}\"] = loss_terms[\"error_t\"][ii].mean().item()\n\n # Correct for normalization on x.\n if not (self.loss_type == \"l2\" and self.training):\n nll = nll - loss_terms[\"delta_log_px\"]\n\n # Transform conditional nll into joint nll\n # Note:\n # loss = -log p(x,h|N) and log p(x,h,N) = log p(x,h|N) + log p(N)\n # Therefore, log p(x,h|N) = -loss + log p(N)\n # => loss_new = -log p(x,h,N) = loss - log p(N)\n nll = nll - loss_terms[\"log_pN\"]\n\n return nll, info\n\n def eval_inplaint_batch(\n self,\n batch: List,\n resamplings: int = 5,\n jump_length: int = 5,\n frag_fixed: List = [0, 2],\n ):\n sampling_ddpm = copy.deepcopy(self.ddpm)\n sampling_ddpm.schedule = self.sampling_schedule\n sampling_ddpm.T = self.sampling_schedule.gamma_module.timesteps\n sampling_ddpm.eval()\n\n representations, conditions = batch\n xh_fixed = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n n_samples = representations[0][\"size\"].size(0)\n fragments_nodes = [repre[\"size\"] for repre in representations]\n with torch.no_grad():\n out_samples, _ = sampling_ddpm.inpaint(\n n_samples=n_samples,\n fragments_nodes=fragments_nodes,\n conditions=conditions,\n return_frames=1,\n resamplings=resamplings,\n jump_length=jump_length,\n timesteps=None,\n xh_fixed=xh_fixed,\n frag_fixed=frag_fixed,\n )\n rmsds = batch_rmsd(\n fragments_nodes,\n out_samples[0],\n xh_fixed,\n idx=1,\n threshold=0.5,\n )\n return np.mean(rmsds), np.median(rmsds)\n\n def training_step(self, batch, batch_idx):\n nll, info = self.compute_loss(batch)\n loss = nll.mean(0)\n\n self.log(\"train-totloss\", loss, rank_zero_only=True)\n for k, v in info.items():\n self.log(f\"train-{k}\", v, rank_zero_only=True)\n\n if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:\n if self.trainer.is_global_zero:\n print(\n \"evaluation on samping for training batch...\",\n batch[1].shape,\n batch_idx,\n )\n rmsd_mean, rmsd_median = self.eval_inplaint_batch(batch)\n info[\"rmsd\"], info[\"rmsd-median\"] = rmsd_mean, rmsd_median\n else:\n info[\"rmsd\"], info[\"rmsd-median\"] = np.nan, np.nan\n info[\"loss\"] = loss\n return info\n\n def _shared_eval(self, batch, batch_idx, prefix, *args):\n nll, info = self.compute_loss(batch)\n loss = nll.mean(0)\n info[\"totloss\"] = loss.item()\n\n if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:\n if self.trainer.is_global_zero:\n print(\n \"evaluation on samping for validation batch...\",\n batch[1].shape,\n batch_idx,\n )\n info[\"rmsd\"], info[\"rmsd-median\"] = self.eval_inplaint_batch(batch)\n else:\n info[\"rmsd\"], info[\"rmsd-median\"] = np.nan, np.nan\n\n info_prefix = {}\n for k, v in info.items():\n info_prefix[f\"{prefix}-{k}\"] = v\n return info_prefix\n\n def validation_step(self, batch, batch_idx, *args):\n return self._shared_eval(batch, batch_idx, \"val\", *args)\n\n def test_step(self, batch, batch_idx, *args):\n return self._shared_eval(batch, batch_idx, \"test\", *args)\n\n def validation_epoch_end(self, val_step_outputs):\n val_epoch_metrics = average_over_batch_metrics(val_step_outputs)\n if self.trainer.is_global_zero:\n pretty_print(self.current_epoch, val_epoch_metrics, prefix=\"val\")\n val_epoch_metrics.update({\"epoch\": self.current_epoch})\n for k, v in val_epoch_metrics.items():\n self.log(k, v, sync_dist=True)\n\n def training_epoch_end(self, outputs) -> None:\n epoch_metrics = average_over_batch_metrics(\n outputs, allowed=[\"rmsd\", \"rmsd-median\"]\n )\n self.log(\"train-rmsd\", epoch_metrics[\"rmsd\"], sync_dist=True)\n self.log(\"train-rmsd-median\", epoch_metrics[\"rmsd-median\"], sync_dist=True)\n\n def configure_gradient_clipping(\n self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm\n ):\n if not self.clip_grad:\n return\n\n # Allow gradient norm to be 150% + 1.5 * stdev of the recent history.\n max_grad_norm = 1.5 * self.gradnorm_queue.mean() + 3 * self.gradnorm_queue.std()\n\n # Get current grad_norm\n params = [p for g in optimizer.param_groups for p in g[\"params\"]]\n grad_norm = utils.get_grad_norm(params)\n\n # Lightning will handle the gradient clipping\n self.clip_gradients(\n optimizer, gradient_clip_val=max_grad_norm, gradient_clip_algorithm=\"norm\"\n )\n\n if float(grad_norm) > max_grad_norm:\n self.gradnorm_queue.add(float(max_grad_norm))\n else:\n self.gradnorm_queue.add(float(grad_norm))\n\n if float(grad_norm) > max_grad_norm:\n print(\n f\"Clipped gradient with value {grad_norm:.1f} \"\n f\"while allowed {max_grad_norm:.1f}\"\n )" }, { "identifier": "ProcessedTS1x", "path": "oa_reactdiff/dataset/transition1x.py", "snippet": "class ProcessedTS1x(BaseDataset):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n single_frag_only=True,\n swapping_react_prod=False,\n append_frag=False,\n reflection=False,\n use_by_ind=False,\n only_ts=False,\n confidence_model=False,\n position_key=\"positions\",\n ediff=None,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n if confidence_model:\n use_by_ind = False\n if remove_h:\n print(\"remove_h is ignored because it is not reasonble for TS.\")\n if single_frag_only:\n single_frag_inds = np.where(\n np.array(self.raw_dataset[\"single_fragment\"]) == 1\n )[0]\n else:\n single_frag_inds = np.array(range(len(self.raw_dataset[\"single_fragment\"])))\n if use_by_ind:\n use_inds = self.raw_dataset[\"use_ind\"]\n else:\n use_inds = range(len(self.raw_dataset[\"single_fragment\"]))\n single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))\n\n data_duplicated = copy.deepcopy(self.raw_dataset)\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in data_duplicated[k].items():\n self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]\n if swapping_react_prod:\n mapped_val = data_duplicated[mapped_k][v]\n self.raw_dataset[k][v] += [\n mapped_val[ii] for ii in single_frag_inds\n ]\n if reflection:\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in self.raw_dataset[k].items():\n if v in [\"wB97x_6-31G(d).forces\", position_key]:\n self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]\n else:\n self.raw_dataset[k][v] += val\n\n self.reactant = self.raw_dataset[\"reactant\"]\n self.transition_state = self.raw_dataset[\"transition_state\"]\n self.product = self.raw_dataset[\"product\"]\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.reactant[\"charges\"])\n self.n_samples = len(self.reactant[\"charges\"])\n\n self.data = {}\n repeat = 2 if swapping_react_prod else 1\n if confidence_model:\n self.data[\"target\"] = torch.tensor(\n self.raw_dataset[\"target\"] * repeat\n ).unsqueeze(1)\n self.data[\"rmsd\"] = torch.tensor(\n self.raw_dataset[\"rmsd\"] * repeat\n ).unsqueeze(1)\n if ediff is not None:\n self.data[\"ediff\"] = torch.tensor(\n self.raw_dataset[ediff][\"ediff\"] * repeat\n ).unsqueeze(1)\n if not only_ts:\n if not append_frag:\n self.process_molecules(\n \"reactant\", n_samples, idx=0, position_key=position_key\n )\n self.process_molecules(\"transition_state\", n_samples, idx=1)\n self.process_molecules(\n \"product\", n_samples, idx=2, position_key=position_key\n )\n else:\n self.process_molecules(\n \"reactant\",\n n_samples,\n idx=0,\n append_charge=0,\n position_key=position_key,\n )\n self.process_molecules(\n \"transition_state\", n_samples, idx=1, append_charge=1\n )\n self.process_molecules(\n \"product\",\n n_samples,\n idx=2,\n append_charge=0,\n position_key=position_key,\n )\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n else:\n if not append_frag:\n self.process_molecules(\"transition_state\", n_samples, idx=0)\n else:\n self.process_molecules(\n \"transition_state\", n_samples, idx=0, append_charge=1\n )\n # for idx in range(2):\n # self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "DiffSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class DiffSchedule(nn.Module):\n def __init__(self, gamma_module: nn.Module, norm_values: Tuple[float]) -> None:\n super().__init__()\n self.gamma_module = gamma_module\n self.norm_values = norm_values\n self.check_issues_norm_values()\n\n @staticmethod\n def inflate_batch_array(array, target):\n r\"\"\"\n Inflates the batch array (array) with only a single axis\n (i.e. shape = (batch_size,), or possibly more empty axes\n (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.\n \"\"\"\n target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)\n return array.view(target_shape)\n\n def sigma(self, gamma, target_tensor):\n r\"\"\"Computes sigma given gamma.\"\"\"\n return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)\n\n def alpha(self, gamma, target_tensor):\n r\"\"\"Computes alpha given gamma.\"\"\"\n return self.inflate_batch_array(\n torch.sqrt(torch.sigmoid(-gamma)), target_tensor\n )\n\n @staticmethod\n def SNR(gamma):\n r\"\"\"Computes signal to noise ratio (alpha^2/sigma^2) given gamma.\"\"\"\n return torch.exp(-gamma)\n\n def sigma_and_alpha_t_given_s(\n self, gamma_t: Tensor, gamma_s: Tensor, target_tensor: Tensor\n ) -> tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.\n These are defined as:\n alpha t given s = alpha t / alpha s,\n sigma t given s = sqrt(1 - (alpha t given s) ^2 ).\n \"\"\"\n sigma2_t_given_s = self.inflate_batch_array(\n -torch.expm1(F.softplus(gamma_s) - F.softplus(gamma_t)), target_tensor\n )\n\n # alpha_t_given_s = alpha_t / alpha_s\n log_alpha2_t = F.logsigmoid(-gamma_t)\n log_alpha2_s = F.logsigmoid(-gamma_s)\n log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s\n\n alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)\n alpha_t_given_s = self.inflate_batch_array(alpha_t_given_s, target_tensor)\n\n sigma_t_given_s = torch.sqrt(sigma2_t_given_s)\n\n return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s\n\n def check_issues_norm_values(self, num_stdevs=8):\n zeros = torch.zeros((1, 1))\n gamma_0 = self.gamma_module(zeros)\n sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()\n\n # Checked if 1 / norm_value is still larger than 10 * standard\n # deviation.\n norm_value = self.norm_values[1]\n\n if sigma_0 * num_stdevs > 1.0 / norm_value:\n raise ValueError(\n f\"Value for normalization value {norm_value} probably too \"\n f\"large with sigma_0 {sigma_0:.5f} and \"\n f\"1 / norm_value = {1. / norm_value}\"\n )" }, { "identifier": "PredefinedNoiseSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class PredefinedNoiseSchedule(nn.Module):\n r\"\"\"\n Predefined noise schedule. Essentially creates a lookup array for predefined\n (non-learned) noise schedules.\n \"\"\"\n\n def __init__(\n self,\n noise_schedule: str,\n timesteps: int,\n precision: float,\n ):\n super().__init__()\n self.timesteps = timesteps\n\n if \"cosine\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) <= 2\n power = 1 if len(splits) == 1 else float(splits[1])\n alphas2 = cosine_beta_schedule(timesteps, raise_to_power=power)\n elif \"polynomial\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 2\n power = float(splits[1])\n alphas2 = polynomial_schedule(timesteps, s=precision, power=power)\n elif \"csin\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 4\n start, end, tau = float(splits[1]), float(splits[2]), float(splits[3])\n alphas2 = ccosine_schedule(timesteps, start=start, end=end, tau=tau)\n elif \"linear\" in noise_schedule:\n alphas2 = linear_schedule(timesteps)\n else:\n raise ValueError(noise_schedule)\n\n # print(\"alphas2\", alphas2)\n\n sigmas2 = 1 - alphas2\n\n log_alphas2 = np.log(alphas2)\n log_sigmas2 = np.log(sigmas2)\n\n log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2\n\n # print(\"gamma\", -log_alphas2_to_sigmas2)\n\n self.gamma = torch.nn.Parameter(\n torch.from_numpy(-log_alphas2_to_sigmas2).float(), requires_grad=False\n )\n\n def forward(self, t):\n t_int = torch.round(t * self.timesteps).long()\n return self.gamma[t_int]" }, { "identifier": "FEATURE_MAPPING", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "FEATURE_MAPPING = [\"pos\", \"one_hot\", \"charge\"]" }, { "identifier": "batch_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def batch_rmsd(\n fragments_nodes: List[Tensor],\n out_samples: List[Tensor],\n xh: List[Tensor],\n idx: int = 1,\n threshold=0.5,\n):\n rmsds = []\n out_samples_use = out_samples[idx]\n xh_use = xh[idx]\n nodes = fragments_nodes[idx].long().cpu().numpy()\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(nodes):\n end_ind += natoms\n mol1 = xh2pmg(out_samples_use[start_ind:end_ind])\n mol2 = xh2pmg(xh_use[start_ind:end_ind])\n try:\n rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)\n except:\n rmsd = 1.0\n rmsds.append(min(rmsd, 1.0))\n start_ind = end_ind\n return rmsds" }, { "identifier": "assemble_sample_inputs", "path": "oa_reactdiff/utils/sampling_tools.py", "snippet": "def assemble_sample_inputs(\n atoms: List,\n device: torch.device = torch.device(\"cuda\"),\n n_samples: int = 1,\n frag_type: bool = False,\n):\n empty_site = torch.tensor([[1, 0, 0, 0, 0, 1]], device=device)\n if not frag_type:\n decoders = [\n {\n \"H\": [1, 0, 0, 0, 0, 1],\n \"C\": [0, 1, 0, 0, 0, 6],\n \"N\": [0, 0, 1, 0, 0, 7],\n \"O\": [0, 0, 0, 1, 0, 8],\n \"F\": [0, 0, 0, 0, 1, 9],\n }\n ] * 2\n else:\n decoders = [\n {\n \"H\": [1, 0, 0, 0, 0, 1, 0],\n \"C\": [0, 1, 0, 0, 0, 6, 0],\n \"N\": [0, 0, 1, 0, 0, 7, 0],\n \"O\": [0, 0, 0, 1, 0, 8, 0],\n \"F\": [0, 0, 0, 0, 1, 9, 0],\n },\n {\n \"H\": [1, 0, 0, 0, 0, 1, 1],\n \"C\": [0, 1, 0, 0, 0, 6, 1],\n \"N\": [0, 0, 1, 0, 0, 7, 1],\n \"O\": [0, 0, 0, 1, 0, 8, 1],\n \"F\": [0, 0, 0, 0, 1, 9, 1],\n },\n ]\n\n h0 = [\n torch.cat(\n [\n torch.tensor([decoders[ii % 2][atom] for atom in atoms], device=device)\n for _ in range(n_samples)\n ]\n )\n for ii in range(3)\n ]\n return h0" }, { "identifier": "write_tmp_xyz", "path": "oa_reactdiff/utils/sampling_tools.py", "snippet": "def write_tmp_xyz(\n fragments_nodes, out_samples, idx=[0], prefix=\"gen\", localpath=\"tmp\", ex_ind=0\n):\n TYPEMAP = {\n 0: \"react\",\n 1: \"ts\",\n 2: \"prod\",\n }\n for ii in idx:\n st = TYPEMAP[ii]\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(fragments_nodes[0]):\n _jj = jj + ex_ind\n xyzfile = f\"{localpath}/{prefix}_{_jj}_{st}.xyz\"\n end_ind += natoms.item()\n write_single_xyz(\n xyzfile,\n natoms.item(),\n out=out_samples[ii][start_ind:end_ind],\n )\n start_ind = end_ind" }, { "identifier": "xyz2pmg", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def xyz2pmg(xyzfile):\n xyz_converter = XYZ(mol=None)\n mol = xyz_converter.from_file(xyzfile).molecule\n return mol" }, { "identifier": "pymatgen_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def pymatgen_rmsd(\n mol1,\n mol2,\n ignore_chirality=False,\n threshold=0.5,\n same_order=False,\n):\n if isinstance(mol1, str):\n mol1 = xyz2pmg(mol1)\n if isinstance(mol2, str):\n mol2 = xyz2pmg(mol2)\n rmsd = rmsd_core(mol1, mol2, threshold)\n if ignore_chirality:\n coords = mol2.cart_coords\n coords[:, -1] = -coords[:, -1]\n mol2_reflect = Molecule(\n species=mol2.species,\n coords=coords,\n )\n rmsd_reflect = rmsd_core(mol1, mol2_reflect, threshold)\n rmsd = min(rmsd, rmsd_reflect)\n return rmsd" }, { "identifier": "pymatgen_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def pymatgen_rmsd(\n mol1,\n mol2,\n ignore_chirality=False,\n threshold=0.5,\n same_order=False,\n):\n if isinstance(mol1, str):\n mol1 = xyz2pmg(mol1)\n if isinstance(mol2, str):\n mol2 = xyz2pmg(mol2)\n rmsd = rmsd_core(mol1, mol2, threshold)\n if ignore_chirality:\n coords = mol2.cart_coords\n coords[:, -1] = -coords[:, -1]\n mol2_reflect = Molecule(\n species=mol2.species,\n coords=coords,\n )\n rmsd_reflect = rmsd_core(mol1, mol2_reflect, threshold)\n rmsd = min(rmsd, rmsd_reflect)\n return rmsd" } ]
import torch import py3Dmol import numpy as np import plotly.express as px import json from typing import Optional from torch import tensor from e3nn import o3 from torch_scatter import scatter_mean from oa_reactdiff.model import LEFTNet from oa_reactdiff.tests.model.utils import ( generate_full_eij, get_cut_graph_mask, ) from torch.utils.data import DataLoader from oa_reactdiff.trainer.pl_trainer import DDPMModule from oa_reactdiff.dataset import ProcessedTS1x from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import FEATURE_MAPPING from oa_reactdiff.analyze.rmsd import batch_rmsd from oa_reactdiff.utils.sampling_tools import ( assemble_sample_inputs, write_tmp_xyz, ) from glob import glob from oa_reactdiff.analyze.rmsd import xyz2pmg, pymatgen_rmsd from pymatgen.core import Molecule from collections import OrderedDict from sklearn.cluster import KMeans from glob import glob from pymatgen.io.xyz import XYZ from openbabel import pybel from oa_reactdiff.analyze.rmsd import pymatgen_rmsd
11,579
# --- 导入和定义一些函数 ---- default_float = torch.float64 torch.set_default_dtype(default_float) # 使用双精度,测试更准确 def remove_mean_batch( x: tensor, indices: Optional[tensor] = None ) -> tensor: """将x中的每个batch的均值去掉 Args: x (tensor): input tensor. indices (Optional[tensor], optional): batch indices. Defaults to None. Returns: tensor: output tensor with batch mean as 0. """ if indices == None: return x - torch.mean(x, dim=0) mean = scatter_mean(x, indices, dim=0) x = x - mean[indices] return x def draw_in_3dmol(mol: str, fmt: str = "xyz") -> py3Dmol.view: """画分子 Args: mol (str): str content of molecule. fmt (str, optional): format. Defaults to "xyz". Returns: py3Dmol.view: output viewer """ viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, fmt) viewer.setStyle({'stick': {}, "sphere": {"radius": 0.36}}) viewer.zoomTo() return viewer def assemble_xyz(z: list, pos: tensor) -> str: """将原子序数和位置组装成xyz格式 Args: z (list): chemical elements pos (tensor): 3D coordinates Returns: str: xyz string """ natoms =len(z) xyz = f"{natoms}\n\n" for _z, _pos in zip(z, pos.numpy()): xyz += f"{_z}\t" + "\t".join([str(x) for x in _pos]) + "\n" return xyz num_layers = 2 hidden_channels = 8 in_hidden_channels = 4 num_radial = 4
# --- 导入和定义一些函数 ---- default_float = torch.float64 torch.set_default_dtype(default_float) # 使用双精度,测试更准确 def remove_mean_batch( x: tensor, indices: Optional[tensor] = None ) -> tensor: """将x中的每个batch的均值去掉 Args: x (tensor): input tensor. indices (Optional[tensor], optional): batch indices. Defaults to None. Returns: tensor: output tensor with batch mean as 0. """ if indices == None: return x - torch.mean(x, dim=0) mean = scatter_mean(x, indices, dim=0) x = x - mean[indices] return x def draw_in_3dmol(mol: str, fmt: str = "xyz") -> py3Dmol.view: """画分子 Args: mol (str): str content of molecule. fmt (str, optional): format. Defaults to "xyz". Returns: py3Dmol.view: output viewer """ viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, fmt) viewer.setStyle({'stick': {}, "sphere": {"radius": 0.36}}) viewer.zoomTo() return viewer def assemble_xyz(z: list, pos: tensor) -> str: """将原子序数和位置组装成xyz格式 Args: z (list): chemical elements pos (tensor): 3D coordinates Returns: str: xyz string """ natoms =len(z) xyz = f"{natoms}\n\n" for _z, _pos in zip(z, pos.numpy()): xyz += f"{_z}\t" + "\t".join([str(x) for x in _pos]) + "\n" return xyz num_layers = 2 hidden_channels = 8 in_hidden_channels = 4 num_radial = 4
model = LEFTNet(
0
2023-10-30 02:53:38+00:00
16k
Weitheskmt/WeiDMD
build/lib/weidmd/cdmd.py
[ { "identifier": "DMDBase", "path": "build/lib/weidmd/dmdbase.py", "snippet": "class DMDBase:\n \"\"\"\n Dynamic Mode Decomposition base class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: If True, amplitudes are computed like in optimized DMD (see\n :func:`~dmdbase.DMDBase._compute_amplitudes` for reference). If\n False, amplitudes are computed following the standard algorithm. If\n `opt` is an integer, it is used as the (temporal) index of the snapshot\n used to compute DMD modes amplitudes (following the standard\n algorithm).\n The reconstruction will generally be better in time instants near the\n chosen snapshot; however increasing `opt` may lead to wrong results\n when the system presents small eigenvalues. For this reason a manual\n selection of the number of eigenvalues considered for the analyisis may\n be needed (check `svd_rank`). Also setting `svd_rank` to a value\n between 0 and 1 may give better results. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n\n :cvar dict original_time: dictionary that contains information about the\n time window where the system is sampled:\n\n - `t0` is the time of the first input snapshot;\n - `tend` is the time of the last input snapshot;\n - `dt` is the delta time between the snapshots.\n\n :cvar dict dmd_time: dictionary that contains information about the time\n window where the system is reconstructed:\n\n - `t0` is the time of the first approximated solution;\n - `tend` is the time of the last approximated solution;\n - `dt` is the delta time between the approximated solutions.\n\n \"\"\"\n\n def __init__(\n self,\n svd_rank=0,\n tlsq_rank=0,\n exact=False,\n opt=False,\n rescale_mode=None,\n forward_backward=False,\n sorted_eigs=False,\n tikhonov_regularization=None,\n ):\n self._Atilde = DMDOperator(\n svd_rank=svd_rank,\n exact=exact,\n rescale_mode=rescale_mode,\n forward_backward=forward_backward,\n sorted_eigs=sorted_eigs,\n tikhonov_regularization=tikhonov_regularization,\n )\n\n self._tlsq_rank = tlsq_rank\n self._original_time = None\n self._dmd_time = None\n self._opt = opt\n self._exact = exact\n\n self._b = None # amplitudes\n self._snapshots_holder = None\n\n self._modes_activation_bitmask_proxy = None\n\n @property\n def dmd_timesteps(self):\n \"\"\"\n Get the timesteps of the reconstructed states.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.dmd_time[\"t0\"],\n self.dmd_time[\"tend\"] + self.dmd_time[\"dt\"],\n self.dmd_time[\"dt\"],\n )\n\n @property\n def original_timesteps(self):\n \"\"\"\n Get the timesteps of the original snapshot.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.original_time[\"t0\"],\n self.original_time[\"tend\"] + self.original_time[\"dt\"],\n self.original_time[\"dt\"],\n )\n\n @property\n def modes(self):\n \"\"\"\n Get the matrix containing the DMD modes, stored by column.\n\n :return: the matrix containing the DMD modes.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.modes\n return self._modes_activation_bitmask_proxy.modes\n\n @property\n def operator(self):\n \"\"\"\n Get the instance of DMDOperator.\n\n :return: the instance of DMDOperator\n :rtype: DMDOperator\n \"\"\"\n return self._Atilde\n\n @property\n def eigs(self):\n \"\"\"\n Get the eigenvalues of A tilde.\n\n :return: the eigenvalues from the eigendecomposition of `atilde`.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.eigenvalues\n return self._modes_activation_bitmask_proxy.eigs\n\n @property\n def dynamics(self):\n \"\"\"\n Get the time evolution of each mode.\n\n .. math::\n\n \\\\mathbf{x}(t) \\\\approx\n \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\exp \\\\left( \\\\omega_{k} t\n \\\\right) b_{k} = \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\left(\n \\\\lambda_{k} \\\\right)^{\\\\left( t / \\\\Delta t \\\\right)} b_{k}\n\n :return: the matrix that contains all the time evolution, stored by\n row.\n :rtype: numpy.ndarray\n \"\"\"\n temp = np.repeat(\n self.eigs[:, None], self.dmd_timesteps.shape[0], axis=1\n )\n tpow = (\n self.dmd_timesteps - self.original_time[\"t0\"]\n ) // self.original_time[\"dt\"]\n\n # The new formula is x_(k+j) = \\Phi \\Lambda^k \\Phi^(-1) x_j.\n # Since j is fixed, for a given snapshot \"u\" we have the following\n # formula:\n # x_u = \\Phi \\Lambda^{u-j} \\Phi^(-1) x_j\n # Therefore tpow must be scaled appropriately.\n tpow = self._translate_eigs_exponent(tpow)\n\n return np.power(temp, tpow) * self.amplitudes[:, None]\n\n def _translate_eigs_exponent(self, tpow):\n \"\"\"\n Transforms the exponent of the eigenvalues in the dynamics formula\n according to the selected value of `self._opt` (check the documentation\n for `opt` in :func:`__init__ <dmdbase.DMDBase.__init__>`).\n\n :param tpow: the exponent(s) of Sigma in the original DMD formula.\n :type tpow: int or np.ndarray\n :return: the exponent(s) adjusted according to `self._opt`\n :rtype: int or np.ndarray\n \"\"\"\n\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n if amplitudes_snapshot_index < 0:\n # we take care of negative indexes: -n becomes T - n\n return tpow - (self.snapshots.shape[1] + amplitudes_snapshot_index)\n else:\n return tpow - amplitudes_snapshot_index\n\n @property\n def reconstructed_data(self):\n \"\"\"\n Get the reconstructed data.\n\n :return: the matrix that contains the reconstructed snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return self.modes.dot(self.dynamics)\n\n @property\n def snapshots(self):\n \"\"\"\n Get the input data (space flattened).\n\n :return: the matrix that contains the flattened snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots\n return None\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Get the original input snapshot shape.\n\n :return: input snapshots shape.\n :rtype: tuple\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots_shape\n return None\n\n @property\n def frequency(self):\n \"\"\"\n Get the amplitude spectrum.\n\n :return: the array that contains the frequencies of the eigenvalues.\n :rtype: numpy.ndarray\n \"\"\"\n return np.log(self.eigs).imag / (2 * np.pi * self.original_time[\"dt\"])\n\n @property\n def growth_rate(self): # To check\n \"\"\"\n Get the growth rate values relative to the modes.\n\n :return: the Floquet values\n :rtype: numpy.ndarray\n \"\"\"\n return self.eigs.real / self.original_time[\"dt\"]\n\n @property\n def amplitudes(self):\n \"\"\"\n Get the coefficients that minimize the error between the original\n system and the reconstructed one. For futher information, see\n `dmdbase._compute_amplitudes`.\n\n :return: the array that contains the amplitudes coefficient.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n return self._modes_activation_bitmask_proxy.amplitudes\n\n @property\n def fitted(self):\n \"\"\"Check whether this DMD instance has been fitted.\n\n :return: `True` is the instance has been fitted, `False` otherwise.\n :rtype: bool\n \"\"\"\n try:\n return self.operator.modes is not None\n except (ValueError, AttributeError):\n return False\n\n @property\n def modes_activation_bitmask(self):\n \"\"\"\n Get the bitmask which controls which DMD modes are enabled at the\n moment in this DMD instance.\n\n The DMD instance must be fitted before this property becomes valid.\n After :func:`fit` is called, the defalt value of\n `modes_activation_bitmask` is an array of `True` values of the same\n shape of :func:`amplitudes`.\n\n The array returned is read-only (this allow us to react appropriately\n to changes in the bitmask). In order to modify the bitmask you need to\n set the field to a brand-new value (see example below).\n\n Example:\n\n .. code-block:: python\n\n >>> # this is an error\n >>> dmd.modes_activation_bitmask[[1,2]] = False\n ValueError: assignment destination is read-only\n >>> tmp = np.array(dmd.modes_activation_bitmask)\n >>> tmp[[1,2]] = False\n >>> dmd.modes_activation_bitmask = tmp\n\n :return: The DMD modes activation bitmask.\n :rtype: numpy.ndarray\n \"\"\"\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n\n bitmask = self._modes_activation_bitmask_proxy.old_bitmask\n # make sure that the array is immutable\n bitmask.flags.writeable = False\n return bitmask\n\n @modes_activation_bitmask.setter\n def modes_activation_bitmask(self, value):\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n value = np.array(value)\n if value.dtype != bool:\n raise RuntimeError(\n \"Unxpected dtype, expected bool, got {}.\".format(value.dtype)\n )\n\n # check that the shape is correct\n if value.shape != self.modes_activation_bitmask.shape:\n raise ValueError(\n \"Expected shape {}, got {}\".format(\n self.modes_activation_bitmask.shape, value.shape\n )\n )\n\n self._modes_activation_bitmask_proxy.change_bitmask(value)\n\n def _allocate_modes_bitmask_proxy(self):\n \"\"\"\n Utility method which allocates the activation bitmask proxy using the\n quantities that are currently available in this DMD instance. Fails\n quietly if the amplitudes are not set.\n \"\"\"\n if hasattr(self, \"_b\") and self._b is not None:\n self._modes_activation_bitmask_proxy = ActivationBitmaskProxy(\n self.operator, self._b\n )\n\n def __getitem__(self, key):\n \"\"\"\n Restrict the DMD modes used by this instance to a subset of indexes\n specified by keys. The value returned is a shallow copy of this DMD\n instance, with a different value in :func:`modes_activation_bitmask`.\n Therefore assignments to attributes are not reflected into the original\n instance.\n\n However the DMD instance returned should not be used for low-level\n manipulations on DMD modes, since the underlying DMD operator is shared\n with the original instance. For this reasons modifications to NumPy\n arrays may result in unwanted and unspecified situations which should\n be avoided in principle.\n\n :param key: An index (integer), slice or list of indexes.\n :type key: int or slice or list or np.ndarray\n :return: A shallow copy of this DMD instance having only a subset of\n DMD modes which are those indexed by `key`.\n :rtype: DMDBase\n \"\"\"\n\n if isinstance(key, (slice, int, list, np.ndarray)):\n filter_function = lambda x: isinstance(x, int)\n\n if isinstance(key, (list, np.ndarray)):\n if not all(map(filter_function, key)):\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or \"\n \"a list of indexes.\"\n )\n # no repeated elements\n if len(key) != len(set(key)):\n raise ValueError(\"Repeated indexes are not supported.\")\n else:\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or a list \"\n \"of indexes, got {}\".format(type(key))\n )\n\n mask = np.full(self.modes_activation_bitmask.shape, False)\n mask[key] = True\n\n shallow_copy = copy(self)\n shallow_copy._allocate_modes_bitmask_proxy()\n shallow_copy.modes_activation_bitmask = mask\n\n return shallow_copy\n\n @property\n def original_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n fit this DMD instance.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first input snapshot (0 by default).\n `tend` Time of the last input snapshot (usually corresponds to the number of snapshots).\n `dt` Timestep between two snapshots (1 by default).\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._original_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._original_time\n\n @property\n def dmd_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n reconstruct/predict using this DMD instance. By default this is equal\n to :func:`original_time`.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first output snapshot.\n `tend` Time of the last output snapshot.\n `dt` Timestep between two snapshots.\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._dmd_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._dmd_time\n\n @dmd_time.setter\n def dmd_time(self, value):\n self._dmd_time = deepcopy(value)\n\n def _set_initial_time_dictionary(self, time_dict):\n \"\"\"\n Set the initial values for the class fields `time_dict` and\n `original_time`. This is usually called in `fit()` and never again.\n\n :param time_dict: Initial time dictionary for this DMD instance.\n :type time_dict: dict\n \"\"\"\n if not (\n \"t0\" in time_dict and \"tend\" in time_dict and \"dt\" in time_dict\n ):\n raise ValueError(\n 'time_dict must contain the keys \"t0\", \"tend\" and \"dt\".'\n )\n if len(time_dict) > 3:\n raise ValueError(\n 'time_dict must contain only the keys \"t0\", \"tend\" and \"dt\".'\n )\n\n self._original_time = DMDTimeDict(dict(time_dict))\n self._dmd_time = DMDTimeDict(dict(time_dict))\n\n def fit(self, X):\n \"\"\"\n Abstract method to fit the snapshots matrices.\n\n Not implemented, it has to be implemented in subclasses.\n \"\"\"\n name = self.__class__.__name__\n msg = f\"Subclass must implement abstract method {name}.fit\"\n raise NotImplementedError(msg)\n\n def _reset(self):\n \"\"\"\n Reset this instance. Should be called in :func:`fit`.\n \"\"\"\n self._modes_activation_bitmask_proxy = None\n self._b = None\n self._snapshots_holder = None\n\n def save(self, fname):\n \"\"\"\n Save the object to `fname` using the pickle module.\n\n :param str fname: the name of file where the reduced order model will\n be saved.\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD(...) # Construct here the rom\n >>> dmd.fit(...)\n >>> dmd.save('pydmd.dmd')\n \"\"\"\n with open(fname, \"wb\") as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def load(fname):\n \"\"\"\n Load the object from `fname` using the pickle module.\n\n :return: The `ReducedOrderModel` loaded\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD.load('pydmd.dmd')\n >>> print(dmd.reconstructed_data)\n \"\"\"\n with open(fname, \"rb\") as output:\n return pickle.load(output)\n\n def _optimal_dmd_matrices(self):\n # compute the vandermonde matrix\n vander = np.vander(self.eigs, len(self.dmd_timesteps), True)\n\n P = np.multiply(\n np.dot(self.modes.conj().T, self.modes),\n np.conj(np.dot(vander, vander.conj().T)),\n )\n\n if self._exact:\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [vander, self.snapshots.conj().T, self.modes]\n )\n )\n )\n else:\n _, s, V = compute_svd(self.snapshots[:, :-1], self.modes.shape[-1])\n\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [\n vander[:, :-1],\n V,\n np.diag(s).conj(),\n self.operator.eigenvectors,\n ]\n )\n )\n )\n\n return P, q\n\n def _compute_amplitudes(self):\n \"\"\"\n Compute the amplitude coefficients. If `self._opt` is False the\n amplitudes are computed by minimizing the error between the modes and\n the first snapshot; if `self._opt` is True the amplitudes are computed\n by minimizing the error between the modes and all the snapshots, at the\n expense of bigger computational cost.\n\n This method uses the class variables self.snapshots (for the\n snapshots), self.modes and self.eigs.\n\n :return: the amplitudes array\n :rtype: numpy.ndarray\n\n References for optimal amplitudes:\n Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,\n https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document\n \"\"\"\n if isinstance(self._opt, bool) and self._opt:\n # b optimal\n a = np.linalg.solve(*self._optimal_dmd_matrices())\n else:\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n a = np.linalg.lstsq(\n self.modes,\n self.snapshots.T[amplitudes_snapshot_index],\n rcond=None,\n )[0]\n\n return a" }, { "identifier": "DMDOperator", "path": "build/lib/weidmd/dmdoperator.py", "snippet": "class DMDOperator:\n \"\"\"\n Dynamic Mode Decomposition standard operator class. Non-standard ways of\n computing the low-rank Atilde operator should be coded into subclasses of\n this class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n \"\"\"\n\n def __init__(\n self,\n svd_rank,\n exact,\n forward_backward,\n rescale_mode,\n sorted_eigs,\n tikhonov_regularization,\n ):\n self._exact = exact\n self._rescale_mode = rescale_mode\n self._svd_rank = svd_rank\n self._forward_backward = forward_backward\n self._sorted_eigs = sorted_eigs\n self._tikhonov_regularization = tikhonov_regularization\n self._norm_X = None\n\n def compute_operator(self, X, Y):\n \"\"\"\n Compute the low-rank operator.\n\n :param numpy.ndarray X: matrix containing the snapshots x0,..x{n-1} by\n column.\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :return: the (truncated) left-singular vectors matrix, the (truncated)\n singular values array, the (truncated) right-singular vectors\n matrix of X.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n \"\"\"\n\n U, s, V = compute_svd(X, self._svd_rank)\n\n if self._tikhonov_regularization is not None:\n self._norm_X = np.linalg.norm(X)\n atilde = self._least_square_operator(U, s, V, Y)\n\n if self._forward_backward:\n # b stands for \"backward\"\n bU, bs, bV = compute_svd(Y, svd_rank=len(s))\n atilde_back = self._least_square_operator(bU, bs, bV, X)\n atilde = sqrtm(atilde.dot(np.linalg.inv(atilde_back)))\n if hasattr(np, \"complex256\") and atilde.dtype == np.complex256:\n atilde = atilde.astype(np.complex128)\n msg = \"Casting atilde from np.complex256 to np.complex128\"\n logging.info(msg)\n\n if self._rescale_mode == \"auto\":\n self._rescale_mode = s\n\n self._Atilde = atilde\n self._compute_eigenquantities()\n self._compute_modes(Y, U, s, V)\n\n return U, s, V\n\n @property\n def shape(self):\n \"\"\"Shape of the operator\"\"\"\n return self.as_numpy_array.shape\n\n def __call__(self, snapshot_lowrank_modal_coefficients):\n \"\"\"\n Apply the low-rank operator to a vector of the modal coefficients of a\n snapshot(s).\n\n :param numpy.ndarray snapshot_lowrank_modal_coefficients: low-rank\n representation (in modal coefficients) of a snapshot x{n}.\n :return: low-rank representation (in modal coefficients) of x{n+1}.\n :rtype: numpy.ndarray\n \"\"\"\n\n return self._Atilde.dot(snapshot_lowrank_modal_coefficients)\n\n @property\n def eigenvalues(self):\n if not hasattr(self, \"_eigenvalues\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvalues\n\n @property\n def eigenvectors(self):\n if not hasattr(self, \"_eigenvectors\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvectors\n\n @property\n def modes(self):\n if not hasattr(self, \"_modes\"):\n raise ValueError(\"You need to call fit before\")\n return self._modes\n\n @property\n def Lambda(self):\n if not hasattr(self, \"_Lambda\"):\n raise ValueError(\"You need to call fit before\")\n return self._Lambda\n\n @property\n def as_numpy_array(self):\n if not hasattr(self, \"_Atilde\") or self._Atilde is None:\n raise ValueError(\"You need to call fit before\")\n else:\n return self._Atilde\n\n def _least_square_operator(self, U, s, V, Y):\n \"\"\"\n Private method that computes the lowrank operator from the singular\n value decomposition of matrix X and the matrix Y.\n\n .. math::\n\n \\\\mathbf{\\\\tilde{A}} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{X}^\\\\dagger \\\\mathbf{U} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{V} \\\\mathbf{S}^{-1}\n\n :param numpy.ndarray U: 2D matrix that contains the left-singular\n vectors of X, stored by column.\n :param numpy.ndarray s: 1D array that contains the singular values of\n X.\n :param numpy.ndarray V: 2D matrix that contains the right-singular\n vectors of X, stored by row.\n :param numpy.ndarray Y: input matrix Y.\n :return: the lowrank operator\n :rtype: numpy.ndarray\n \"\"\"\n if self._tikhonov_regularization is not None:\n s = (\n s**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(s)\n return np.linalg.multi_dot([U.T.conj(), Y, V]) * np.reciprocal(s)\n\n def _compute_eigenquantities(self):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n low-dimensional operator, scaled according to self._rescale_mode.\n \"\"\"\n\n if self._rescale_mode is None:\n # scaling isn't required\n Ahat = self._Atilde\n elif isinstance(self._rescale_mode, np.ndarray):\n if len(self._rescale_mode) != self.as_numpy_array.shape[0]:\n raise ValueError(\n \"\"\"Scaling by an invalid number of\n coefficients\"\"\"\n )\n scaling_factors_array = self._rescale_mode\n\n factors_inv_sqrt = np.diag(np.power(scaling_factors_array, -0.5))\n factors_sqrt = np.diag(np.power(scaling_factors_array, 0.5))\n\n # if an index is 0, we get inf when taking the reciprocal\n for idx, item in enumerate(scaling_factors_array):\n if item == 0:\n factors_inv_sqrt[idx] = 0\n\n Ahat = np.linalg.multi_dot(\n [factors_inv_sqrt, self.as_numpy_array, factors_sqrt]\n )\n else:\n raise ValueError(\n \"Invalid value for rescale_mode: {} of type {}\".format(\n self._rescale_mode, type(self._rescale_mode)\n )\n )\n\n self._eigenvalues, self._eigenvectors = np.linalg.eig(Ahat)\n\n if self._sorted_eigs is not False and self._sorted_eigs is not None:\n if self._sorted_eigs == \"abs\":\n\n def k(tp):\n return abs(tp[0])\n\n elif self._sorted_eigs == \"real\":\n\n def k(tp):\n eig = tp[0]\n if isinstance(eig, complex):\n return (eig.real, eig.imag)\n return (eig.real, 0)\n\n else:\n raise ValueError(\n \"Invalid value for sorted_eigs: {}\".format(\n self._sorted_eigs\n )\n )\n\n # each column is an eigenvector, therefore we take the\n # transpose to associate each row (former column) to an\n # eigenvalue before sorting\n a, b = zip(\n *sorted(zip(self._eigenvalues, self._eigenvectors.T), key=k)\n )\n self._eigenvalues = np.array([eig for eig in a])\n # we restore the original condition (eigenvectors in columns)\n self._eigenvectors = np.array([vec for vec in b]).T\n\n def _compute_modes(self, Y, U, Sigma, V):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n high-dimensional operator (stored in self.modes and self.Lambda).\n\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :param numpy.ndarray U: (truncated) left singular vectors of X\n :param numpy.ndarray Sigma: (truncated) singular values of X\n :param numpy.ndarray V: (truncated) right singular vectors of X\n \"\"\"\n\n if self._rescale_mode is None:\n W = self.eigenvectors\n else:\n # compute W as shown in arXiv:1409.5496 (section 2.4)\n factors_sqrt = np.diag(np.power(self._rescale_mode, 0.5))\n W = factors_sqrt.dot(self.eigenvectors)\n\n # compute the eigenvectors of the high-dimensional operator\n if self._exact:\n if self._tikhonov_regularization is not None:\n Sigma = (\n Sigma**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(Sigma)\n high_dimensional_eigenvectors = (\n Y.dot(V) * np.reciprocal(Sigma)\n ).dot(W)\n else:\n high_dimensional_eigenvectors = U.dot(W)\n\n # eigenvalues are the same of lowrank\n high_dimensional_eigenvalues = self.eigenvalues\n\n self._modes = high_dimensional_eigenvectors\n self._Lambda = high_dimensional_eigenvalues" }, { "identifier": "Snapshots", "path": "build/lib/weidmd/snapshots.py", "snippet": "class Snapshots:\n \"\"\"\n Utility class to preprocess snapshots shape for DMD.\n\n This class expects the time to be the last dimensions of the array.\n If a Python list is passed to the constructor, each element in the\n list is assumed to be a snapshot in time.\n\n Space dimensions are flattened (C-order) such that the\n matrix becomes 2D (time changes along the last axis).\n\n :param numpy.array | list(numpy.array) X: Training snapshots.\n \"\"\"\n\n def __init__(self, X):\n (\n self._snapshots,\n self._snapshots_shape,\n ) = Snapshots._unroll_space_dimensions(X)\n\n if self._snapshots.shape[-1] == 1:\n raise ValueError(\"Received only one time snapshot.\")\n\n Snapshots._check_condition_number(self._snapshots)\n\n logging.info(\n \"Snapshots: %s, snapshot shape: %s\",\n self._snapshots.shape,\n self._snapshots_shape,\n )\n\n @staticmethod\n def _unroll_space_dimensions(X):\n if hasattr(X, \"ndim\"):\n if X.ndim == 1:\n raise ValueError(\n \"Expected at least a 2D matrix (space x time).\"\n )\n snapshots = X.reshape((-1, X.shape[-1]))\n shapes = set((X.shape[:-1],))\n else:\n shapes, arrays = zip(\n *[(xarr.shape, xarr.flatten()) for xarr in map(np.asarray, X)]\n )\n\n shapes = set(shapes)\n if len(shapes) != 1:\n raise ValueError(\n f\"Snapshots must have the same size, found {len(shapes)}.\"\n )\n if len(next(iter(shapes))) == 0:\n raise ValueError(\"Expected at least a 2D matrix\")\n\n # move the time to the last axis\n snapshots = np.moveaxis(np.stack(arrays), 0, -1)\n\n return snapshots, shapes.pop()\n\n @staticmethod\n def _check_condition_number(X):\n cond_number = np.linalg.cond(X)\n if cond_number > 10e4:\n warnings.warn(\n f\"Input data condition number {cond_number}. \"\n \"\"\"Consider preprocessing data, passing in augmented data\nmatrix, or regularization methods.\"\"\"\n )\n\n @property\n def snapshots(self):\n \"\"\"\n Snapshots of the system (space flattened).\n \"\"\"\n return self._snapshots\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Original (i.e. non-flattened) snapshot shape (time is ignored).\n \"\"\"\n return self._snapshots_shape" }, { "identifier": "compute_svd", "path": "build/lib/weidmd/utils.py", "snippet": "def compute_svd(X, svd_rank=0):\n \"\"\"\n Truncated Singular Value Decomposition.\n\n :param numpy.ndarray X: the matrix to decompose.\n :param svd_rank: the rank for the truncation; If 0, the method computes\n the optimal rank and uses it for truncation; if positive interger,\n the method uses the argument for the truncation; if float between 0\n and 1, the rank is the number of the biggest singular values that\n are needed to reach the 'energy' specified by `svd_rank`; if -1,\n the method does not compute truncation. Default is 0.\n :type svd_rank: int or float\n :return: the truncated left-singular vectors matrix, the truncated\n singular values array, the truncated right-singular vectors matrix.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n\n References:\n Gavish, Matan, and David L. Donoho, The optimal hard threshold for\n singular values is, IEEE Transactions on Information Theory 60.8\n (2014): 5040-5053.\n \"\"\"\n U, s, V = np.linalg.svd(X, full_matrices=False)\n V = V.conj().T\n\n def omega(x):\n return 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43\n\n if svd_rank == 0:\n beta = np.divide(*sorted(X.shape))\n tau = np.median(s) * omega(beta)\n rank = np.sum(s > tau)\n if rank == 0:\n warnings.warn(\n \"SVD optimal rank is 0. The largest singular values are \"\n \"indistinguishable from noise. Setting rank truncation to 1.\",\n RuntimeWarning,\n )\n rank = 1\n elif 0 < svd_rank < 1:\n cumulative_energy = np.cumsum(s**2 / (s**2).sum())\n rank = np.searchsorted(cumulative_energy, svd_rank) + 1\n elif svd_rank >= 1 and isinstance(svd_rank, int):\n rank = min(svd_rank, U.shape[1])\n else:\n rank = X.shape[1]\n\n U = U[:, :rank]\n V = V[:, :rank]\n s = s[:rank]\n\n return U, s, V" }, { "identifier": "compute_tlsq", "path": "build/lib/weidmd/utils.py", "snippet": "def compute_tlsq(X, Y, tlsq_rank):\n \"\"\"\n Compute Total Least Square.\n\n :param numpy.ndarray X: the first matrix;\n :param numpy.ndarray Y: the second matrix;\n :param int tlsq_rank: the rank for the truncation; If 0, the method\n does not compute any noise reduction; if positive number, the\n method uses the argument for the SVD truncation used in the TLSQ\n method.\n :return: the denoised matrix X, the denoised matrix Y\n :rtype: numpy.ndarray, numpy.ndarray\n\n References:\n https://arxiv.org/pdf/1703.11004.pdf\n https://arxiv.org/pdf/1502.03854.pdf\n \"\"\"\n # Do not perform tlsq\n if tlsq_rank == 0:\n return X, Y\n\n V = np.linalg.svd(np.append(X, Y, axis=0), full_matrices=False)[-1]\n rank = min(tlsq_rank, V.shape[0])\n VV = V[:rank, :].conj().T.dot(V[:rank, :])\n\n return X.dot(VV), Y.dot(VV)" } ]
import numpy as np import scipy.sparse from scipy.linalg import sqrtm from .dmdbase import DMDBase from .dmdoperator import DMDOperator from .snapshots import Snapshots from .utils import compute_svd, compute_tlsq
10,856
from __future__ import division class CDMDOperator(DMDOperator): """ DMD operator for Compressed-DMD. :param svd_rank: the rank for the truncation; If 0, the method computes the optimal rank and uses it for truncation; if positive interger, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float :param rescale_mode: Scale Atilde as shown in 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its eigendecomposition. None means no rescaling, 'auto' means automatic rescaling using singular values, otherwise the scaling factors. :type rescale_mode: {'auto'} or None or numpy.ndarray :param bool forward_backward: If True, the low-rank operator is computed like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is False. :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary part to break ties) if `sorted_eigs='real'`. Default: False. :type sorted_eigs: {'real', 'abs'} or False :param tikhonov_regularization: Tikhonov parameter for the regularization. If `None`, no regularization is applied, if `float`, it is used as the :math:`\lambda` tikhonov parameter. :type tikhonov_regularization: int or float """ def __init__( self, svd_rank, rescale_mode, forward_backward, sorted_eigs, tikhonov_regularization, ): super().__init__( svd_rank=svd_rank, exact=True, rescale_mode=rescale_mode, forward_backward=forward_backward, sorted_eigs=sorted_eigs, tikhonov_regularization=tikhonov_regularization, ) self._Atilde = None def compute_operator(self, compressedX, compressedY, nonCompressedY): """ Compute the low-rank operator. :param numpy.ndarray compressedX: the compressed version of the matrix containing the snapshots x0,..x{n-1} by column. :param numpy.ndarray compressedY: the compressed version of the matrix containing the snapshots x1,..x{n} by column. :param numpy.ndarray nonCompressedY: the matrix containing the snapshots x1,..x{n} by column. :return: the (truncated) left-singular vectors matrix, the (truncated) singular values array, the (truncated) right-singular vectors matrix of compressedX. :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray """ U, s, V = compute_svd(compressedX, svd_rank=self._svd_rank) atilde = self._least_square_operator(U, s, V, compressedY) if self._forward_backward: # b stands for "backward" bU, bs, bV = compute_svd(compressedY, svd_rank=self._svd_rank) atilde_back = self._least_square_operator(bU, bs, bV, compressedX) atilde = sqrtm(atilde.dot(np.linalg.inv(atilde_back))) self._Atilde = atilde self._compute_eigenquantities() self._compute_modes(nonCompressedY, U, s, V) return U, s, V
from __future__ import division class CDMDOperator(DMDOperator): """ DMD operator for Compressed-DMD. :param svd_rank: the rank for the truncation; If 0, the method computes the optimal rank and uses it for truncation; if positive interger, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float :param rescale_mode: Scale Atilde as shown in 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its eigendecomposition. None means no rescaling, 'auto' means automatic rescaling using singular values, otherwise the scaling factors. :type rescale_mode: {'auto'} or None or numpy.ndarray :param bool forward_backward: If True, the low-rank operator is computed like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is False. :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary part to break ties) if `sorted_eigs='real'`. Default: False. :type sorted_eigs: {'real', 'abs'} or False :param tikhonov_regularization: Tikhonov parameter for the regularization. If `None`, no regularization is applied, if `float`, it is used as the :math:`\lambda` tikhonov parameter. :type tikhonov_regularization: int or float """ def __init__( self, svd_rank, rescale_mode, forward_backward, sorted_eigs, tikhonov_regularization, ): super().__init__( svd_rank=svd_rank, exact=True, rescale_mode=rescale_mode, forward_backward=forward_backward, sorted_eigs=sorted_eigs, tikhonov_regularization=tikhonov_regularization, ) self._Atilde = None def compute_operator(self, compressedX, compressedY, nonCompressedY): """ Compute the low-rank operator. :param numpy.ndarray compressedX: the compressed version of the matrix containing the snapshots x0,..x{n-1} by column. :param numpy.ndarray compressedY: the compressed version of the matrix containing the snapshots x1,..x{n} by column. :param numpy.ndarray nonCompressedY: the matrix containing the snapshots x1,..x{n} by column. :return: the (truncated) left-singular vectors matrix, the (truncated) singular values array, the (truncated) right-singular vectors matrix of compressedX. :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray """ U, s, V = compute_svd(compressedX, svd_rank=self._svd_rank) atilde = self._least_square_operator(U, s, V, compressedY) if self._forward_backward: # b stands for "backward" bU, bs, bV = compute_svd(compressedY, svd_rank=self._svd_rank) atilde_back = self._least_square_operator(bU, bs, bV, compressedX) atilde = sqrtm(atilde.dot(np.linalg.inv(atilde_back))) self._Atilde = atilde self._compute_eigenquantities() self._compute_modes(nonCompressedY, U, s, V) return U, s, V
class CDMD(DMDBase):
0
2023-10-30 12:37:40+00:00
16k
lewandofskee/DiAD
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True,timesteps=1000):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n x_T=None,\n timesteps=1000,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose,timesteps=timesteps)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n timesteps=timesteps,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0])\n # subset_end = int(timesteps+1 * self.ddim_timesteps.shape[0] / self.ddpm_num_timesteps)\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % 500 == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "cal_anomaly_map", "path": "utils/util.py", "snippet": "def cal_anomaly_map(fs_list, ft_list, out_size=224, amap_mode='mul'):\n if amap_mode == 'mul':\n anomaly_map = np.ones([out_size, out_size])\n else:\n anomaly_map = np.zeros([out_size, out_size])\n a_map_list = []\n for i in range(len(ft_list)):\n fs = fs_list[i]\n ft = ft_list[i]\n #fs_norm = F.normalize(fs, p=2)\n #ft_norm = F.normalize(ft, p=2)\n a_map = 1 - F.cosine_similarity(fs, ft)\n a_map = torch.unsqueeze(a_map, dim=1)\n a_map = F.interpolate(a_map, size=out_size, mode='bilinear', align_corners=True)\n a_map = a_map[0, 0, :, :].to('cpu').detach().numpy()\n a_map_list.append(a_map)\n if amap_mode == 'mul':\n anomaly_map *= a_map\n else:\n anomaly_map += a_map\n return anomaly_map, a_map_list" }, { "identifier": "log_local", "path": "utils/util.py", "snippet": "def log_local(images, filenames):\n pixel_mean = [0.485, 0.456, 0.406]\n pixel_std = [0.229, 0.224, 0.225]\n pixel_mean = torch.tensor(pixel_mean).cuda().unsqueeze(1).unsqueeze(1) # 3 x 1 x 1\n pixel_std = torch.tensor(pixel_std).cuda().unsqueeze(1).unsqueeze(1)\n root = os.path.join('log_image/')\n name = filenames[-7:-4]\n for k in images:\n image = (images[k].squeeze() * pixel_std + pixel_mean) * 255\n image = image.permute(1, 2, 0).to('cpu').numpy()\n filename = \"{}-{}.jpg\".format(name, k)\n path = os.path.join(root, filenames[:-7],filename)\n os.makedirs(os.path.split(path)[0], exist_ok=True)\n # Image.fromarray(image).save(path)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(path, image)" }, { "identifier": "create_logger", "path": "utils/util.py", "snippet": "def create_logger(name, log_file, level=logging.INFO):\n log = logging.getLogger(name)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s\"\n )\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n log.setLevel(level)\n log.addHandler(fh)\n log.addHandler(sh)\n return log" }, { "identifier": "dump", "path": "utils/eval_helper.py", "snippet": "def dump(save_dir, outputs):\n filenames = outputs[\"filename\"]\n batch_size = len(filenames)\n preds = outputs[\"pred\"].cpu().numpy() # B x 1 x H x W\n masks = outputs[\"mask\"].cpu().numpy() # B x 1 x H x W\n # heights = outputs[\"height\"].cpu().numpy()\n # widths = outputs[\"width\"].cpu().numpy()\n clsnames = outputs[\"clsname\"]\n for i in range(batch_size):\n file_dir, filename = os.path.split(filenames[i])\n _, subname = os.path.split(file_dir)\n filename = \"{}_{}_{}\".format(clsnames[i], subname, filename)\n filename, _ = os.path.splitext(filename)\n save_file = os.path.join(save_dir, filename + \".npz\")\n np.savez(\n save_file,\n filename=filenames[i],\n pred=preds[i],\n mask=masks[i],\n # height=heights[i],\n # width=widths[i],\n clsname=clsnames[i],\n )" }, { "identifier": "log_metrics", "path": "utils/eval_helper.py", "snippet": "def log_metrics(ret_metrics, config):\n logger = logging.getLogger(\"global_logger\")\n clsnames = set([k.rsplit(\"_\", 2)[0] for k in ret_metrics.keys()])\n clsnames = list(clsnames - set([\"mean\"])) + [\"mean\"]\n\n # auc\n if config.get(\"auc\", None):\n auc_keys = [k for k in ret_metrics.keys() if \"auc\" in k]\n evalnames = list(set([k.rsplit(\"_\", 2)[1] for k in auc_keys]))\n record = Report([\"clsname\"] + evalnames)\n\n for clsname in clsnames:\n clsvalues = [\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)]\n for evalname in evalnames\n ]\n record.add_one_record([clsname] + clsvalues)\n\n logger.info(f\"\\n{record}\")" }, { "identifier": "merge_together", "path": "utils/eval_helper.py", "snippet": "def merge_together(save_dir):\n npz_file_list = glob.glob(os.path.join(save_dir, \"*.npz\"))\n fileinfos = []\n preds = []\n masks = []\n for npz_file in npz_file_list:\n npz = np.load(npz_file)\n fileinfos.append(\n {\n \"filename\": str(npz[\"filename\"]),\n # \"height\": npz[\"height\"],\n # \"width\": npz[\"width\"],\n \"clsname\": str(npz[\"clsname\"]),\n }\n )\n preds.append(npz[\"pred\"])\n masks.append(npz[\"mask\"])\n preds = np.concatenate(np.asarray(preds), axis=0) # N x H x W\n masks = np.concatenate(np.asarray(masks), axis=0) # N x H x W\n return fileinfos, preds, masks" }, { "identifier": "performances", "path": "utils/eval_helper.py", "snippet": "def performances(fileinfos, preds, masks, config):\n ret_metrics = {}\n clsnames = set([fileinfo[\"clsname\"] for fileinfo in fileinfos])\n for clsname in clsnames:\n preds_cls = []\n masks_cls = []\n file_cls = []\n for fileinfo, pred, mask in zip(fileinfos, preds, masks):\n if fileinfo[\"clsname\"] == clsname:\n preds_cls.append(pred[None, ...])\n masks_cls.append(mask[None, ...])\n file_cls.append(fileinfo['filename'])\n preds_cls = np.concatenate(np.asarray(preds_cls), axis=0) # N x H x W\n masks_cls = np.concatenate(np.asarray(masks_cls), axis=0) # N x H x W\n data_meta = EvalDataMeta(preds_cls, masks_cls, file_cls)\n\n # auc\n if config.get(\"auc\", None):\n for metric in config[\"auc\"]:\n evalname = metric[\"name\"]\n kwargs = metric.get(\"kwargs\", {})\n eval_method = eval_lookup_table[evalname](data_meta, **kwargs)\n auc = eval_method.eval_auc()\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)] = auc\n\n if config.get(\"auc\", None):\n for metric in config[\"auc\"]:\n evalname = metric[\"name\"]\n evalvalues = [\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)]\n for clsname in clsnames\n ]\n mean_auc = np.mean(np.array(evalvalues))\n ret_metrics[\"{}_{}_auc\".format(\"mean\", evalname)] = mean_auc\n\n return ret_metrics" } ]
import torch import os import logging import timm import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from scipy.ndimage import gaussian_filter from utils.util import cal_anomaly_map, log_local, create_logger from utils.eval_helper import dump, log_metrics, merge_together, performances
14,186
@torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
15
2023-10-30 14:21:09+00:00
16k
nv-tlabs/trace
tbsim/evaluation/env_builders.py
[ { "identifier": "UnifiedDataset", "path": "trajdata/src/trajdata/dataset.py", "snippet": "class UnifiedDataset(Dataset):\n # @profile\n def __init__(\n self,\n desired_data: List[str],\n scene_description_contains: Optional[List[str]] = None,\n centric: str = \"agent\",\n desired_dt: Optional[float] = None,\n history_sec: Tuple[Optional[float], Optional[float]] = (\n None,\n None,\n ), # Both inclusive\n future_sec: Tuple[Optional[float], Optional[float]] = (\n None,\n None,\n ), # Both inclusive\n agent_interaction_distances: Dict[\n Tuple[AgentType, AgentType], float\n ] = defaultdict(lambda: np.inf),\n incl_robot_future: bool = False,\n incl_map: bool = False,\n map_params: Optional[Dict[str, int]] = None,\n only_types: Optional[List[AgentType]] = None,\n only_predict: Optional[List[AgentType]] = None,\n no_types: Optional[List[AgentType]] = None,\n standardize_data: bool = True,\n standardize_derivatives: bool = False,\n augmentations: Optional[List[Augmentation]] = None,\n max_agent_num: Optional[int] = None,\n data_dirs: Dict[str, str] = {\n # \"nusc_trainval\": \"~/datasets/nuScenes\",\n # \"nusc_test\": \"~/datasets/nuScenes\",\n \"eupeds_eth\": \"~/datasets/eth_ucy_peds\",\n \"eupeds_hotel\": \"~/datasets/eth_ucy_peds\",\n \"eupeds_univ\": \"~/datasets/eth_ucy_peds\",\n \"eupeds_zara1\": \"~/datasets/eth_ucy_peds\",\n \"eupeds_zara2\": \"~/datasets/eth_ucy_peds\",\n \"nusc_mini\": \"~/datasets/nuScenes\",\n \"lyft_sample\": \"~/datasets/lyft/scenes/sample.zarr\",\n # \"lyft_train\": \"~/datasets/lyft/scenes/train.zarr\",\n # \"lyft_train_full\": \"~/datasets/lyft/scenes/train_full.zarr\",\n # \"lyft_val\": \"~/datasets/lyft/scenes/validate.zarr\",\n },\n cache_type: str = \"dataframe\",\n cache_location: str = \"~/.unified_data_cache\",\n rebuild_cache: bool = False,\n rebuild_maps: bool = False,\n num_workers: int = 0,\n verbose: bool = False,\n extras: Dict[str, Callable[..., np.ndarray]] = dict(),\n ) -> None:\n \"\"\"Instantiates a PyTorch Dataset object which aggregates data\n from multiple trajectory forecasting datasets.\n\n Args:\n desired_data (List[str]): Names of datasets, splits, scene tags, etc. See the README for more information.\n scene_description_contains (Optional[List[str]], optional): Only return data from scenes whose descriptions contain one or more of these strings. Defaults to None.\n centric (str, optional): One of {\"agent\", \"scene\"}, specifies what a batch element contains data for (one agent at one timestep or all agents in a scene at one timestep). Defaults to \"agent\".\n desired_dt (Optional[float], optional): Specifies the desired data sampling rate, an error will be raised if the original and desired data sampling rate are not integer multiples of each other. Defaults to None.\n history_sec (Tuple[Optional[float], Optional[float]], optional): A tuple containing (the minimum seconds of history each batch element must contain, the maximum seconds of history to return). Both inclusive. Defaults to ( None, None, ).\n future_sec (Tuple[Optional[float], Optional[float]], optional): A tuple containing (the minimum seconds of future data each batch element must contain, the maximum seconds of future data to return). Both inclusive. Defaults to ( None, None, ).\n agent_interaction_distances: (Dict[Tuple[AgentType, AgentType], float]): A dictionary mapping agent-agent interaction distances in meters (determines which agents are included as neighbors to the predicted agent). Defaults to infinity for all types.\n incl_robot_future (bool, optional): Include the ego agent's future trajectory in batches (accordingly, never predict the ego's future). Defaults to False.\n incl_map (bool, optional): Include a local cropping of the rasterized map (if the dataset provides a map) per agent. Defaults to False.\n map_params (Optional[Dict[str, int]], optional): Local map cropping parameters, must be specified if incl_map is True. Must contain keys {\"px_per_m\", \"map_size_px\"} and can optionally contain {\"offset_frac_xy\"}. Defaults to None.\n only_types (Optional[List[AgentType]], optional): Filter out all agents EXCEPT for those of the specified types. Defaults to None.\n only_predict (Optional[List[AgentType]], optional): Only predict the specified types of agents. Importantly, this keeps other agent types in the scene, e.g., as neighbors of the agent to be predicted. Defaults to None.\n no_types (Optional[List[AgentType]], optional): Filter out all agents with the specified types. Defaults to None.\n standardize_data (bool, optional): Standardize all data such that (1) the predicted agent's orientation at the current timestep is 0, (2) all data is made relative to the predicted agent's current position, and (3) the agent's heading value is replaced with its sin, cos values. Defaults to True.\n standardize_derivatives (bool, optional): Make agent velocities and accelerations relative to the agent being predicted. Defaults to False.\n augmentations (Optional[List[Augmentation]], optional): Perform the specified augmentations to the batch or dataset. Defaults to None.\n max_agent_num (int, optional): The maximum number of agents to include in a batch for scene-centric batching.\n data_dirs (Optional[Dict[str, str]], optional): Dictionary mapping dataset names to their directories on disk. Defaults to { \"eupeds_eth\": \"~/datasets/eth_ucy_peds\", \"eupeds_hotel\": \"~/datasets/eth_ucy_peds\", \"eupeds_univ\": \"~/datasets/eth_ucy_peds\", \"eupeds_zara1\": \"~/datasets/eth_ucy_peds\", \"eupeds_zara2\": \"~/datasets/eth_ucy_peds\", \"nusc_mini\": \"~/datasets/nuScenes\", \"lyft_sample\": \"~/datasets/lyft/scenes/sample.zarr\", }.\n cache_type (str, optional): What type of cache to use to store preprocessed, cached data on disk. Defaults to \"dataframe\".\n cache_location (str, optional): Where to store and load preprocessed, cached data. Defaults to \"~/.unified_data_cache\".\n rebuild_cache (bool, optional): If True, process and cache trajectory data even if it is already cached. Defaults to False.\n rebuild_maps (bool, optional): If True, process and cache maps even if they are already cached. Defaults to False.\n num_workers (int, optional): Number of parallel workers to use for dataset preprocessing and loading. Defaults to 0.\n verbose (bool, optional): If True, print internal data loading information. Defaults to False.\n extras (Dict[str, Callable[..., np.ndarray]], optional): Adds extra data to each batch element. Each Callable must take as input a filled {Agent,Scene}BatchElement and return an ndarray which will subsequently be added to the batch element's `extra` dict.\n \"\"\"\n self.centric: str = centric\n self.desired_dt: float = desired_dt\n\n if cache_type == \"dataframe\":\n self.cache_class = DataFrameCache\n\n self.rebuild_cache: bool = rebuild_cache\n self.cache_path: Path = Path(cache_location).expanduser().resolve()\n self.cache_path.mkdir(parents=True, exist_ok=True)\n self.env_cache: EnvCache = EnvCache(self.cache_path)\n\n if incl_map:\n assert (\n map_params is not None\n ), r\"Path size information, i.e., {'px_per_m': ..., 'map_size_px': ...}, must be provided if incl_map=True\"\n assert (\n map_params[\"map_size_px\"] % 2 == 0\n ), \"Patch parameter 'map_size_px' must be divisible by 2\"\n\n self.history_sec = history_sec\n self.future_sec = future_sec\n self.agent_interaction_distances = agent_interaction_distances\n self.incl_robot_future = incl_robot_future\n self.incl_map = incl_map\n self.map_params = (\n map_params if map_params is not None else {\"px_per_m\": DEFAULT_PX_PER_M}\n )\n self.only_types = None if only_types is None else set(only_types)\n self.only_predict = None if only_predict is None else set(only_predict)\n self.no_types = None if no_types is None else set(no_types)\n self.standardize_data = standardize_data\n self.standardize_derivatives = standardize_derivatives\n self.augmentations = augmentations\n self.extras = extras\n self.verbose = verbose\n self.max_agent_num = max_agent_num\n\n # Ensuring scene description queries are all lowercase\n if scene_description_contains is not None:\n scene_description_contains = [s.lower() for s in scene_description_contains]\n\n self.envs: List[RawDataset] = env_utils.get_raw_datasets(data_dirs)\n self.envs_dict: Dict[str, RawDataset] = {env.name: env for env in self.envs}\n\n matching_datasets: List[SceneTag] = self.get_matching_scene_tags(desired_data)\n if self.verbose:\n print(\n \"Loading data for matched scene tags:\",\n string_utils.pretty_string_tags(matching_datasets),\n flush=True,\n )\n\n all_scenes_list: Union[List[SceneMetadata], List[Scene]] = list()\n for env in self.envs:\n if any(env.name in dataset_tuple for dataset_tuple in matching_datasets):\n all_data_cached: bool = False\n all_maps_cached: bool = not env.has_maps or not self.incl_map\n if self.env_cache.env_is_cached(env.name) and not self.rebuild_cache:\n scenes_list: List[Scene] = self.get_desired_scenes_from_env(\n matching_datasets, scene_description_contains, env\n )\n\n all_data_cached: bool = all(\n self.env_cache.scene_is_cached(\n scene.env_name, scene.name, scene.dt\n )\n for scene in scenes_list\n )\n\n all_maps_cached: bool = (\n not env.has_maps\n or not self.incl_map\n or all(\n self.cache_class.is_map_cached(\n self.cache_path,\n env.name,\n scene.location,\n self.map_params[\"px_per_m\"],\n )\n for scene in scenes_list\n )\n )\n\n if (\n not all_data_cached\n or not all_maps_cached\n or self.rebuild_cache\n or rebuild_maps\n ):\n # Loading dataset objects in case we don't have\n # the desired data already cached.\n env.load_dataset_obj(verbose=self.verbose)\n\n if (\n rebuild_maps\n or not all_maps_cached\n or not self.cache_class.are_maps_cached(\n self.cache_path, env.name\n )\n ):\n env.cache_maps(\n self.cache_path,\n self.cache_class,\n resolution=self.map_params[\"px_per_m\"],\n )\n\n scenes_list: List[SceneMetadata] = self.get_desired_scenes_from_env(\n matching_datasets, scene_description_contains, env\n )\n\n all_scenes_list += scenes_list\n\n # List of cached scene paths.\n scene_paths: List[Path] = self.preprocess_scene_data(\n all_scenes_list, num_workers\n )\n if self.verbose:\n print(len(scene_paths), \"scenes in the scene index.\")\n\n # Done with this list. Cutting memory usage because\n # of multiprocessing later on.\n del all_scenes_list\n\n data_index: Union[\n List[Tuple[str, int, np.ndarray]],\n List[Tuple[str, int, List[Tuple[str, np.ndarray]]]],\n ] = self.get_data_index(num_workers, scene_paths)\n\n # Done with this list. Cutting memory usage because\n # of multiprocessing later on.\n del scene_paths\n\n self._scene_index: List[Path] = [orig_path for orig_path, _, _ in data_index]\n\n # The data index is effectively a big list of tuples taking the form:\n # (scene_path: str, index_len: int, valid_timesteps: np.ndarray[, agent_name: str])\n self._data_index: DataIndex = (\n AgentDataIndex(data_index, self.verbose)\n if self.centric == \"agent\"\n else SceneDataIndex(data_index, self.verbose)\n )\n self._data_len: int = len(self._data_index)\n\n def get_data_index(\n self, num_workers: int, scene_paths: List[Path]\n ) -> Union[\n List[Tuple[str, int, np.ndarray]],\n List[Tuple[str, int, List[Tuple[str, np.ndarray]]]],\n ]:\n # We're doing all this staticmethod malarkey so that multiprocessing\n # doesn't copy the UnifiedDataset self object (which generally slows down the\n # rate of spinning up new processes and hogs memory).\n desc: str = f\"Creating {self.centric.capitalize()} Data Index\"\n\n if self.centric == \"scene\":\n data_index_fn = partial(\n UnifiedDataset._get_data_index_scene,\n only_types=self.only_types,\n no_types=self.no_types,\n history_sec=self.history_sec,\n future_sec=self.future_sec,\n desired_dt=self.desired_dt,\n )\n elif self.centric == \"agent\":\n data_index_fn = partial(\n UnifiedDataset._get_data_index_agent,\n incl_robot_future=self.incl_robot_future,\n only_types=self.only_types,\n only_predict=self.only_predict,\n no_types=self.no_types,\n history_sec=self.history_sec,\n future_sec=self.future_sec,\n desired_dt=self.desired_dt,\n )\n\n # data_index is either:\n # [(scene_path, total_index_len, valid_scene_ts)] for scene-centric data, or\n # [(scene_path, total_index_len, [(agent_name, valid_agent_ts)])] for agent-centric data\n data_index: Union[\n List[Tuple[str, int, np.ndarray]],\n List[Tuple[str, int, List[Tuple[str, np.ndarray]]]],\n ] = list()\n if num_workers <= 1:\n for scene_info_path in tqdm(\n scene_paths,\n desc=desc + \" (Serially)\",\n disable=not self.verbose,\n ):\n _, orig_path, index_elems_len, index_elems = data_index_fn(\n scene_info_path\n )\n if len(index_elems) > 0:\n data_index.append((str(orig_path), index_elems_len, index_elems))\n else:\n for (_, orig_path, index_elems_len, index_elems) in parallel_iapply(\n data_index_fn,\n scene_paths,\n num_workers=num_workers,\n desc=desc + f\" ({num_workers} CPUs)\",\n disable=not self.verbose,\n ):\n if len(index_elems) > 0:\n data_index.append((str(orig_path), index_elems_len, index_elems))\n\n return data_index\n\n @staticmethod\n def _get_data_index_scene(\n scene_info_path: Path,\n only_types: Optional[Set[AgentType]],\n no_types: Optional[Set[AgentType]],\n history_sec: Tuple[Optional[float], Optional[float]],\n future_sec: Tuple[Optional[float], Optional[float]],\n desired_dt: Optional[float],\n ret_scene_info: bool = False,\n ) -> Tuple[Optional[Scene], Path, int, np.ndarray]:\n index_elems: List[int] = list()\n\n scene: Scene = EnvCache.load(scene_info_path)\n scene_utils.enforce_desired_dt(scene, desired_dt)\n\n for ts in range(scene.length_timesteps):\n # This is where we remove scene timesteps that would have no remaining agents after filtering.\n if filtering.all_agents_excluded_types(no_types, scene.agent_presence[ts]):\n continue\n elif filtering.no_agent_included_types(\n only_types, scene.agent_presence[ts]\n ):\n continue\n\n if filtering.no_agent_satisfies_time(\n ts,\n scene.dt,\n history_sec,\n future_sec,\n scene.agent_presence[ts],\n ):\n # Ignore this datum if no agent in the scene satisfies our time requirements.\n continue\n\n index_elems.append(ts)\n\n return (\n (scene if ret_scene_info else None),\n scene_info_path,\n len(index_elems),\n np.array(index_elems, dtype=np.int),\n )\n\n @staticmethod\n def _get_data_index_agent(\n scene_info_path: Path,\n incl_robot_future: bool,\n only_types: Optional[Set[AgentType]],\n only_predict: Optional[Set[AgentType]],\n no_types: Optional[Set[AgentType]],\n history_sec: Tuple[Optional[float], Optional[float]],\n future_sec: Tuple[Optional[float], Optional[float]],\n desired_dt: Optional[float],\n ret_scene_info: bool = False,\n ) -> Tuple[Optional[Scene], Path, int, List[Tuple[str, np.ndarray]]]:\n index_elems_len: int = 0\n index_elems: List[Tuple[str, np.ndarray]] = list()\n\n scene: Scene = EnvCache.load(scene_info_path)\n scene_utils.enforce_desired_dt(scene, desired_dt)\n\n filtered_agents: List[AgentMetadata] = filtering.agent_types(\n scene.agents,\n no_types,\n only_predict if only_predict is not None else only_types,\n )\n\n for agent_info in filtered_agents:\n # Don't want to predict the ego if we're going to be giving the model its future!\n if incl_robot_future and agent_info.name == \"ego\":\n continue\n\n valid_ts: Tuple[int, int] = filtering.get_valid_ts(\n agent_info, scene.dt, history_sec, future_sec\n )\n\n num_agent_ts: int = valid_ts[1] - valid_ts[0] + 1\n if num_agent_ts > 0:\n index_elems_len += num_agent_ts\n index_elems.append((agent_info.name, np.array(valid_ts, dtype=np.int)))\n\n return (\n (scene if ret_scene_info else None),\n scene_info_path,\n index_elems_len,\n index_elems,\n )\n\n def get_collate_fn(self, return_dict: bool = False) -> Callable:\n batch_augments: Optional[List[BatchAugmentation]] = None\n if self.augmentations:\n batch_augments = [\n batch_aug\n for batch_aug in self.augmentations\n if isinstance(batch_aug, BatchAugmentation)\n ]\n\n if self.centric == \"agent\":\n collate_fn = partial(\n agent_collate_fn, return_dict=return_dict, batch_augments=batch_augments\n )\n elif self.centric == \"scene\":\n collate_fn = partial(\n scene_collate_fn,\n return_dict=return_dict,\n batch_augments=batch_augments,\n )\n\n return collate_fn\n\n def get_matching_scene_tags(self, queries: List[str]) -> List[SceneTag]:\n # if queries is None:\n # return list(chain.from_iterable(env.components for env in self.envs))\n\n query_tuples = [set(data.split(\"-\")) for data in queries]\n\n matching_scene_tags: List[SceneTag] = list()\n for query_tuple in query_tuples:\n for env in self.envs:\n matching_scene_tags += env.get_matching_scene_tags(query_tuple)\n\n return matching_scene_tags\n\n def get_desired_scenes_from_env(\n self,\n scene_tags: List[SceneTag],\n scene_description_contains: Optional[List[str]],\n env: RawDataset,\n ) -> Union[List[Scene], List[SceneMetadata]]:\n scenes_list: Union[List[Scene], List[SceneMetadata]] = list()\n for scene_tag in scene_tags:\n if env.name in scene_tag:\n scenes_list += env.get_matching_scenes(\n scene_tag,\n scene_description_contains,\n self.env_cache,\n self.rebuild_cache,\n )\n\n return scenes_list\n\n def preprocess_scene_data(\n self,\n scenes_list: Union[List[SceneMetadata], List[Scene]],\n num_workers: int,\n ) -> List[Path]:\n all_cached: bool = not self.rebuild_cache and all(\n self.env_cache.scene_is_cached(\n scene_info.env_name,\n scene_info.name,\n self.desired_dt if self.desired_dt is not None else scene_info.dt,\n )\n for scene_info in scenes_list\n )\n\n serial_scenes: List[SceneMetadata]\n parallel_scenes: List[SceneMetadata]\n if num_workers > 1 and not all_cached:\n serial_scenes = [\n scene_info\n for scene_info in scenes_list\n if not self.envs_dict[scene_info.env_name].parallelizable\n ]\n parallel_scenes = [\n scene_info\n for scene_info in scenes_list\n if self.envs_dict[scene_info.env_name].parallelizable\n ]\n else:\n serial_scenes = scenes_list\n parallel_scenes = list()\n\n # List of (Original cached path, Temporary cached path)\n scene_paths: List[Path] = list()\n if serial_scenes:\n # Scenes for which it's faster to process them serially. See\n # the longer comment below for a more thorough explanation.\n scene_info: SceneMetadata\n for scene_info in tqdm(\n serial_scenes,\n desc=\"Calculating Agent Data (Serially)\",\n disable=not self.verbose,\n ):\n scene_dt: float = (\n self.desired_dt if self.desired_dt is not None else scene_info.dt\n )\n if self.env_cache.scene_is_cached(\n scene_info.env_name, scene_info.name, scene_dt\n ):\n # This is a fast path in case we don't need to\n # perform any modifications to the scene_info.\n scene_path: Path = EnvCache.scene_metadata_path(\n self.cache_path,\n scene_info.env_name,\n scene_info.name,\n scene_dt,\n )\n\n scene_paths.append(scene_path)\n continue\n\n corresponding_env: RawDataset = self.envs_dict[scene_info.env_name]\n scene: Scene = agent_utils.get_agent_data(\n scene_info,\n corresponding_env,\n self.env_cache,\n self.rebuild_cache,\n self.cache_class,\n self.desired_dt,\n )\n\n scene_path: Path = EnvCache.scene_metadata_path(\n self.cache_path, scene.env_name, scene.name, scene.dt\n )\n scene_paths.append(scene_path)\n\n # Done with these lists. Cutting memory usage because\n # of multiprocessing below.\n del serial_scenes\n scenes_list.clear()\n\n # No more need for the original dataset objects and freeing up\n # this memory allows the parallel processing below to run very fast.\n # The dataset objects for any envs used below will be loaded in each\n # process.\n for env in self.envs:\n env.del_dataset_obj()\n\n # Scenes for which it's faster to process them in parallel\n # Note this really only applies to scenes whose raw datasets\n # are \"parallelizable\" AKA take up a small amount of memory\n # and effectively act as a window into the data on disk.\n # E.g., NuScenes objects load a lot of data into RAM, so\n # they are not parallelizable and should be processed\n # serially after loading the dataset object once\n # (thankfully it is quite fast to do so).\n if parallel_scenes:\n # Here we're using PyTorch's parallel dataloading as a\n # general parallel processing interface (it uses all the same\n # multiprocessing package under the hood anyways, but it has\n # some good logic for keeping workers occupied which seems\n # like it'd be good to reuse).\n parallel_preprocessor = ParallelDatasetPreprocessor(\n parallel_scenes,\n {\n env_name: str(env.metadata.data_dir)\n for env_name, env in self.envs_dict.items()\n },\n str(self.env_cache.path),\n self.desired_dt,\n self.cache_class,\n self.rebuild_cache,\n )\n\n # Done with this list. Cutting memory usage because\n # of multiprocessing below.\n del parallel_scenes\n\n # This shouldn't be necessary, but sometimes old\n # (large) dataset objects haven't been garbage collected\n # by this time, causing memory usage to skyrocket during\n # parallel data preprocessing below.\n gc.collect()\n\n dataloader = DataLoader(\n parallel_preprocessor,\n batch_size=1,\n num_workers=num_workers,\n shuffle=False,\n collate_fn=scene_paths_collate_fn,\n )\n\n for processed_scene_paths in tqdm(\n dataloader,\n desc=f\"Calculating Agent Data ({num_workers} CPUs)\",\n disable=not self.verbose,\n ):\n scene_paths += [Path(path_str) for path_str in processed_scene_paths]\n return scene_paths\n\n def get_scene(self, scene_idx: int) -> Scene:\n scene: Scene = EnvCache.load(self._scene_index[scene_idx])\n scene_utils.enforce_desired_dt(scene, self.desired_dt)\n return scene\n\n def num_scenes(self) -> int:\n return len(self._scene_index)\n\n def scenes(self) -> Scene:\n for scene_idx in range(self.num_scenes()):\n yield self.get_scene(scene_idx)\n\n def __len__(self) -> int:\n return self._data_len\n\n def __getitem__(self, idx: int) -> Union[SceneBatchElement, AgentBatchElement]:\n if self.centric == \"scene\":\n scene_path, ts = self._data_index[idx]\n elif self.centric == \"agent\":\n scene_path, agent_id, ts = self._data_index[idx]\n\n scene: Scene = EnvCache.load(scene_path)\n scene_utils.enforce_desired_dt(scene, self.desired_dt)\n scene_cache: SceneCache = self.cache_class(\n self.cache_path, scene, ts, self.augmentations\n )\n\n if self.centric == \"scene\":\n scene_time: SceneTime = SceneTime.from_cache(\n scene,\n ts,\n scene_cache,\n only_types=self.only_types,\n no_types=self.no_types,\n )\n\n batch_element: SceneBatchElement = SceneBatchElement(\n scene_cache,\n idx,\n scene_time,\n self.history_sec,\n self.future_sec,\n self.agent_interaction_distances,\n self.incl_robot_future,\n self.incl_map,\n self.map_params,\n self.standardize_data,\n self.standardize_derivatives,\n self.max_agent_num,\n )\n elif self.centric == \"agent\":\n scene_time_agent: SceneTimeAgent = SceneTimeAgent.from_cache(\n scene,\n ts,\n agent_id,\n scene_cache,\n only_types=self.only_types,\n no_types=self.no_types,\n incl_robot_future=self.incl_robot_future,\n )\n\n batch_element: AgentBatchElement = AgentBatchElement(\n scene_cache,\n idx,\n scene_time_agent,\n self.history_sec,\n self.future_sec,\n self.agent_interaction_distances,\n self.incl_robot_future,\n self.incl_map,\n self.map_params,\n self.standardize_data,\n self.standardize_derivatives,\n )\n\n for key, extra_fn in self.extras.items():\n batch_element.extras[key] = extra_fn(batch_element)\n\n return batch_element" }, { "identifier": "EvaluationConfig", "path": "tbsim/configs/eval_config.py", "snippet": "class EvaluationConfig(Dict):\n def __init__(self):\n super(EvaluationConfig, self).__init__()\n self.name = None\n self.env = \"trajdata\"\n self.dataset_path = None\n self.eval_class = \"\"\n self.seed = 0\n self.num_scenes_per_batch = 1\n self.num_scenes_to_evaluate = 1\n\n self.num_episode_repeats = 1\n self.start_frame_index_each_episode = None # if specified, should be the same length as num_episode_repeats\n self.seed_each_episode = None # if specified, should be the same length as num_episode_repeats\n\n self.ego_only = False # needed for training rollout callback\n\n self.ckpt_root_dir = \"checkpoints/\"\n self.experience_hdf5_path = None\n self.results_dir = \"results/\"\n\n self.ckpt.policy.ckpt_dir = None\n self.ckpt.policy.ckpt_key = None\n\n self.policy.num_action_samples = 10\n\n self.metrics.compute_analytical_metrics = True\n\n self.trajdata.trajdata_cache_location = \"~/.unified_data_cache\"\n self.trajdata.trajdata_rebuild_cache = False\n\n #\n # eupeds\n # \n\n # self.trajdata.trajdata_source_test = [\"eupeds_eth-val\", \"eupeds_hotel-val\", \"eupeds_univ-val\", \"eupeds_zara1-val\", \"eupeds_zara2-val\"]\n # self.trajdata.trajdata_data_dirs = {\n # \"eupeds_eth\" : \"./datasets/eth_ucy\", \n # \"eupeds_hotel\" : \"./datasets/eth_ucy\",\n # \"eupeds_univ\" : \"./datasets/eth_ucy\",\n # \"eupeds_zara1\" : \"./datasets/eth_ucy\",\n # \"eupeds_zara2\" : \"./datasets/eth_ucy\"\n # }\n # self.trajdata.num_scenes_to_evaluate = 6\n # self.trajdata.eval_scenes = np.arange(6).tolist()\n # self.trajdata.n_step_action = 2\n # self.trajdata.num_simulation_steps = 25\n # self.trajdata.skip_first_n = 0\n\n #\n # orca\n #\n\n self.trajdata.trajdata_source_test = [\"orca_maps-test\"]\n self.trajdata.trajdata_data_dirs = {\n \"orca_maps\" : \"./datasets/orca_sim\",\n \"orca_no_maps\" : \"./datasets/orca_sim\",\n }\n self.trajdata.num_scenes_to_evaluate = 200\n self.trajdata.eval_scenes = np.arange(200).tolist()\n self.trajdata.n_step_action = 1 #5\n self.trajdata.num_simulation_steps = 100\n self.trajdata.skip_first_n = 0\n self.policy.num_action_samples = 10\n \n #\n # nusc\n #\n\n # self.trajdata.trajdata_source_test = [\"nusc_trainval-val\"]\n # self.trajdata.trajdata_data_dirs = {\n # \"nusc_trainval\" : \"./datasets/nuscenes\",\n # }\n # self.trajdata.num_scenes_to_evaluate = 100\n # self.trajdata.eval_scenes = np.arange(100).tolist()\n # self.trajdata.n_step_action = 5\n # self.trajdata.num_simulation_steps = 100\n # self.trajdata.skip_first_n = 0\n\n def clone(self):\n return deepcopy(self)" }, { "identifier": "ExperimentConfig", "path": "tbsim/configs/base.py", "snippet": "class ExperimentConfig(Dict):\n def __init__(\n self,\n train_config: TrainConfig,\n env_config: EnvConfig,\n algo_config: AlgoConfig,\n registered_name: str = None,\n ):\n \"\"\"\n\n Args:\n train_config (TrainConfig): training config\n env_config (EnvConfig): environment config\n algo_config (AlgoConfig): algorithm config\n registered_name (str): name of the experiment config object in the global config registry\n \"\"\"\n super(ExperimentConfig, self).__init__()\n self.registered_name = registered_name\n\n self.train = train_config\n self.env = env_config\n self.algo = algo_config\n\n # Write all results to this directory. A new folder with the timestamp will be created\n # in this directory, and it will contain three subfolders - \"log\", \"models\", and \"videos\".\n # The \"log\" directory will contain tensorboard and stdout txt logs. The \"models\" directory\n # will contain saved model checkpoints.\n self.name = (\n \"test\" # name of the experiment (creates a subdirectory under root_dir)\n )\n\n self.root_dir = \"{}_trained_models/\".format(self.algo.name)\n self.seed = 1 # seed for everything (for reproducibility)\n\n self.devices.num_gpus = 1 # Set to 0 to use CPU\n\n def clone(self):\n return self.__class__(\n train_config=deepcopy(self.train),\n env_config=deepcopy(self.env),\n algo_config=deepcopy(self.algo),\n registered_name=self.registered_name,\n )" }, { "identifier": "EnvUnifiedSimulation", "path": "tbsim/envs/env_trajdata.py", "snippet": "class EnvUnifiedSimulation(BaseEnv, BatchedEnv):\n def __init__(\n self,\n env_config,\n num_scenes,\n dataset: UnifiedDataset,\n seed=0,\n prediction_only=False,\n metrics=None,\n log_data=True,\n ):\n \"\"\"\n A gym-like interface for simulating traffic behaviors (both ego and other agents) with UnifiedDataset\n\n Args:\n env_config (EnvConfig): a Config object specifying the behavior of the simulator\n num_scenes (int): number of scenes to run in parallel\n dataset (UnifiedDataset): a UnifiedDataset instance that contains scene data for simulation\n prediction_only (bool): if set to True, ignore the input action command and only record the predictions\n \"\"\"\n print(env_config)\n self._npr = np.random.RandomState(seed=seed)\n self.dataset = dataset\n self._env_config = env_config\n\n self._num_total_scenes = dataset.num_scenes()\n self._num_scenes = num_scenes\n\n # indices of the scenes (in dataset) that are being used for simulation\n self._current_scenes: List[SimulationScene] = None # corresponding dataset of the scenes\n self._current_scene_indices = None\n\n self._frame_index = 0\n self._done = False\n self._prediction_only = prediction_only\n\n self._cached_observation = None\n self._cached_raw_observation = None\n\n self._metrics = dict() if metrics is None else metrics\n self._persistent_metrics = self._metrics\n self._log_data = log_data\n self.logger = None\n\n def update_random_seed(self, seed):\n self._npr = np.random.RandomState(seed=seed)\n\n @property\n def current_scene_names(self):\n return deepcopy([scene.scene_name for scene in self._current_scenes])\n\n @property\n def current_num_agents(self):\n return sum(len(scene.agents) for scene in self._current_scenes)\n\n def reset_multi_episodes_metrics(self):\n for v in self._metrics.values():\n v.multi_episode_reset()\n\n @property\n def current_agent_scene_index(self):\n si = []\n for scene_i, scene in zip(self.current_scene_index, self._current_scenes):\n si.extend([scene_i] * len(scene.agents))\n return np.array(si, dtype=np.int64)\n\n @property\n def current_agent_track_id(self):\n return np.arange(self.current_num_agents)\n\n @property\n def current_scene_index(self):\n return self._current_scene_indices.copy()\n\n @property\n def current_agent_names(self):\n names = []\n for scene in self._current_scenes:\n names.extend([a.name for a in scene.agents])\n return names\n\n @property\n def num_instances(self):\n return self._num_scenes\n\n @property\n def total_num_scenes(self):\n return self._num_total_scenes\n\n def is_done(self):\n return self._done\n\n def get_reward(self):\n # TODO\n return np.zeros(self._num_scenes)\n\n @property\n def horizon(self):\n return self._env_config.simulation.num_simulation_steps\n\n def _disable_offroad_agents(self, scene):\n obs = scene.get_obs()\n obs = parse_trajdata_batch(obs)\n if obs[\"maps\"] is not None:\n obs_maps = verify_map(obs[\"maps\"])\n drivable_region = get_drivable_region_map(obs_maps)\n raster_pos = transform_points_tensor(obs[\"centroid\"][:, None], obs[\"raster_from_world\"])[:, 0]\n valid_agents = []\n for i, rpos in enumerate(raster_pos):\n if scene.agents[i].name == \"ego\" or drivable_region[i, int(rpos[1]), int(rpos[0])].item() > 0:\n valid_agents.append(scene.agents[i])\n\n scene.agents = valid_agents\n \n def add_new_agents(self,agent_data_by_scene):\n for sim_scene,agent_data in agent_data_by_scene.items():\n if sim_scene not in self._current_scenes:\n continue\n if len(agent_data)>0:\n sim_scene.add_new_agents(agent_data)\n\n def reset(self, scene_indices: List = None, start_frame_index = None):\n \"\"\"\n Reset the previous simulation episode. Randomly sample a batch of new scenes unless specified in @scene_indices\n\n Args:\n scene_indices (List): Optional, a list of scene indices to initialize the simulation episode\n start_frame_index (int or list of ints) : either a single frame number or a list of starting frames corresponding to the given scene_indices\n \"\"\"\n if scene_indices is None:\n # randomly sample a batch of scenes for close-loop rollouts\n all_indices = np.arange(self._num_total_scenes)\n scene_indices = self._npr.choice(\n all_indices, size=(self.num_instances,), replace=False\n )\n\n scene_info = [self.dataset.get_scene(i) for i in scene_indices]\n\n self._num_scenes = len(scene_info)\n self._current_scene_indices = scene_indices\n\n assert (\n np.max(scene_indices) < self._num_total_scenes\n and np.min(scene_indices) >= 0\n )\n if start_frame_index is None:\n start_frame_index = self._env_config.simulation.start_frame_index\n self._current_scenes = []\n scenes_valid = []\n for i, si in enumerate(scene_info):\n try:\n cur_start_frame = start_frame_index[i] if isinstance(start_frame_index, list) else start_frame_index\n sim_scene: SimulationScene = SimulationScene(\n env_name=self._env_config.name,\n scene_name=si.name,\n scene=si,\n dataset=self.dataset,\n init_timestep=cur_start_frame,\n freeze_agents=True,\n return_dict=True\n )\n except Exception as e:\n print('Invalid scene %s..., skipping' % (si.name))\n print(e)\n scenes_valid.append(False)\n continue\n\n obs = sim_scene.reset()\n self._disable_offroad_agents(sim_scene)\n self._current_scenes.append(sim_scene)\n scenes_valid.append(True)\n\n self._frame_index = 0\n self._cached_observation = None\n self._cached_raw_observation = None\n self._done = False\n\n obs_keys_to_log = [\n \"centroid\",\n \"yaw\",\n \"extent\",\n \"world_from_agent\",\n \"scene_index\",\n \"track_id\"\n ]\n info_keys_to_log = [\n \"action_samples\",\n ]\n self.logger = RolloutLogger(obs_keys=obs_keys_to_log,\n info_keys=info_keys_to_log)\n\n for v in self._metrics.values():\n v.reset()\n\n return scenes_valid\n\n def render(self, actions_to_take):\n raise NotImplementedError('rendering not implemented for this env')\n\n def get_random_action(self):\n ac = self._npr.randn(self.current_num_agents, 1, 3)\n agents = Action(\n positions=ac[:, :, :2],\n yaws=ac[:, :, 2:3]\n )\n\n return RolloutAction(agents=agents)\n\n def get_info(self):\n info = dict(scene_index=self.current_scene_names)\n if self._log_data:\n sim_buffer = self.logger.get_serialized_scene_buffer()\n sim_buffer = [sim_buffer[k] for k in self.current_scene_index]\n info[\"buffer\"] = sim_buffer\n self.logger.get_trajectory()\n return info\n\n def get_multi_episode_metrics(self):\n metrics = dict()\n for met_name, met in self._metrics.items():\n met_vals = met.get_multi_episode_metrics()\n if isinstance(met_vals, dict):\n for k, v in met_vals.items():\n metrics[met_name + \"_\" + k] = v\n elif met_vals is not None:\n metrics[met_name] = met_vals\n return metrics\n\n def get_metrics(self):\n \"\"\"\n Get metrics of the current episode (may compute before is_done==True)\n\n Returns: a dictionary of metrics, each containing an array of measurement same length as the number of scenes\n \"\"\"\n metrics = dict()\n # get ADE and FDE from SimulationScene\n metrics[\"ade\"] = np.zeros(self.num_instances)\n metrics[\"fde\"] = np.zeros(self.num_instances)\n for i, scene in enumerate(self._current_scenes):\n mets_per_agent = scene.get_metrics([sim_metrics.ADE(), sim_metrics.FDE()])\n metrics[\"ade\"][i] = np.array(list(mets_per_agent[\"ade\"].values())).mean()\n metrics[\"fde\"][i] = np.array(list(mets_per_agent[\"fde\"].values())).mean()\n\n # aggregate per-step metrics\n for met_name, met in self._metrics.items():\n met_vals = met.get_episode_metrics()\n if isinstance(met_vals, dict):\n for k, v in met_vals.items():\n metrics[met_name + \"_\" + k] = v\n else:\n metrics[met_name] = met_vals\n\n for k in metrics:\n assert metrics[k].shape == (self.num_instances,)\n return metrics\n\n def get_observation_by_scene(self):\n obs = self.get_observation()[\"agents\"]\n obs_by_scene = []\n obs_scene_index = self.current_agent_scene_index\n for i in range(self.num_instances):\n obs_by_scene.append(TensorUtils.map_ndarray(obs, lambda x: x[obs_scene_index == i]))\n return obs_by_scene\n\n def get_observation(self):\n if self._cached_observation is not None:\n return self._cached_observation\n\n raw_obs = []\n for si, scene in enumerate(self._current_scenes):\n raw_obs.extend(scene.get_obs(collate=False))\n agent_obs = self.dataset.get_collate_fn(return_dict=True)(raw_obs)\n agent_obs = parse_trajdata_batch(agent_obs, overwrite_nan=False)\n agent_obs = TensorUtils.to_numpy(agent_obs)\n agent_obs[\"scene_index\"] = self.current_agent_scene_index\n agent_obs[\"track_id\"] = self.current_agent_track_id\n\n # corner case where no agents in the scene are visible up to full history.\n # so need to pad\n expected_hist_len = floor(self.dataset.history_sec[1] / self.dataset.desired_dt) + 1\n pad_len = expected_hist_len - agent_obs[\"history_positions\"].shape[1]\n if pad_len > 0:\n B = agent_obs[\"history_positions\"].shape[0]\n # pad with zeros and set to unavaible\n agent_obs[\"history_positions\"] = np.concatenate([np.zeros((B, pad_len, 2), dtype=agent_obs[\"history_positions\"].dtype), agent_obs[\"history_positions\"]], axis=1)\n agent_obs[\"history_yaws\"] = np.concatenate([np.zeros((B, pad_len, 1), dtype=agent_obs[\"history_yaws\"].dtype), agent_obs[\"history_yaws\"]], axis=1)\n agent_obs[\"history_speeds\"] = np.concatenate([np.zeros((B, pad_len), dtype=agent_obs[\"history_speeds\"].dtype), agent_obs[\"history_speeds\"]], axis=1)\n agent_obs[\"history_availabilities\"] = np.concatenate([np.zeros((B, pad_len), dtype=agent_obs[\"history_availabilities\"].dtype), agent_obs[\"history_availabilities\"]], axis=1)\n\n N = agent_obs[\"all_other_agents_history_positions\"].shape[1]\n agent_obs[\"all_other_agents_history_positions\"] = np.concatenate([np.zeros((B, N, pad_len, 2), dtype=agent_obs[\"all_other_agents_history_positions\"].dtype), agent_obs[\"all_other_agents_history_positions\"]], axis=2)\n agent_obs[\"all_other_agents_history_yaws\"] = np.concatenate([np.zeros((B, N, pad_len, 1), dtype=agent_obs[\"all_other_agents_history_yaws\"].dtype), agent_obs[\"all_other_agents_history_yaws\"]], axis=2)\n agent_obs[\"all_other_agents_history_speeds\"] = np.concatenate([np.zeros((B, N, pad_len), dtype=agent_obs[\"all_other_agents_history_speeds\"].dtype), agent_obs[\"all_other_agents_history_speeds\"]], axis=2)\n agent_obs[\"all_other_agents_history_availabilities\"] = np.concatenate([np.zeros((B, N, pad_len), dtype=agent_obs[\"all_other_agents_history_availabilities\"].dtype), agent_obs[\"all_other_agents_history_availabilities\"]], axis=2)\n agent_obs[\"all_other_agents_history_availability\"] = np.concatenate([np.zeros((B, N, pad_len), dtype=agent_obs[\"all_other_agents_history_availability\"].dtype), agent_obs[\"all_other_agents_history_availability\"]], axis=2)\n agent_obs[\"all_other_agents_history_extents\"] = np.concatenate([np.zeros((B, N, pad_len, 3), dtype=agent_obs[\"all_other_agents_history_extents\"].dtype), agent_obs[\"all_other_agents_history_extents\"]], axis=2)\n\n # cache observations\n self._cached_observation = dict(agents=agent_obs)\n\n return self._cached_observation\n\n\n def get_observation_skimp(self):\n raw_obs = []\n for si, scene in enumerate(self._current_scenes):\n raw_obs.extend(scene.get_obs(collate=False, get_map=False))\n agent_obs = self.dataset.get_collate_fn(return_dict=True)(raw_obs)\n agent_obs = parse_trajdata_batch(agent_obs, overwrite_nan=False)\n agent_obs = TensorUtils.to_numpy(agent_obs)\n agent_obs[\"scene_index\"] = self.current_agent_scene_index\n agent_obs[\"track_id\"] = self.current_agent_track_id\n return dict(agents=agent_obs)\n\n def _add_per_step_metrics(self, obs):\n for k, v in self._metrics.items():\n v.add_step(obs, self.current_scene_index)\n\n def _step(self, step_actions: RolloutAction, num_steps_to_take):\n if self.is_done():\n raise SimulationException(\"Cannot step in a finished episode\")\n\n obs = self.get_observation()[\"agents\"] \n\n action = step_actions.agents.to_dict()\n action_samples = None if \"action_samples\" not in step_actions.agents_info else step_actions.agents_info[\"action_samples\"]\n action_info = {k : v for k, v in step_actions.agents_info.items() if k != \"action_samples\"}\n for action_index in range(num_steps_to_take):\n if action_index >= action[\"positions\"].shape[1]: # GT actions may be shorter\n self._done = True\n self._frame_index += action_index\n self._cached_observation = None\n return\n\n # compute metrics\n # add map info from original observation so metrics like offroad can be computed\n # NOTE: this assumes metrics will use centroid (which is in world frame) and raster_from_world for transforms.\n obs_skimp = self.get_observation_skimp()\n obs_skimp[\"agents\"][\"image\"] = obs[\"image\"]\n obs_skimp[\"agents\"][\"raster_from_world\"] = obs[\"raster_from_world\"]\n self._add_per_step_metrics(obs_skimp[\"agents\"])\n\n # log actions\n if self._log_data:\n log_agents_info = action_info.copy()\n if action_samples is not None:\n # need to truncate samples as well\n # assuming action_samples is given as (B,N,T,D)\n # swaps to (B,T,N,D) for logging\n log_agents_info[\"action_samples\"] = TensorUtils.map_ndarray(action_samples, lambda x: np.swapaxes(x[:, :, action_index:], 1, 2))\n \n action_to_log = RolloutAction(\n agents=Action.from_dict(TensorUtils.map_ndarray(action, lambda x: x[:, action_index:])),\n agents_info=log_agents_info,\n )\n # this function assumes all actions to log have time dimension at index 1\n self.logger.log_step(obs_skimp, action_to_log)\n\n # step the scene\n idx = 0\n for scene in self._current_scenes:\n scene_action = dict()\n for agent in scene.agents:\n curr_yaw = obs[\"curr_agent_state\"][idx, -1]\n curr_pos = obs[\"curr_agent_state\"][idx, :2]\n world_from_agent = np.array(\n [\n [np.cos(curr_yaw), np.sin(curr_yaw)],\n [-np.sin(curr_yaw), np.cos(curr_yaw)],\n ]\n )\n next_state = np.ones(3, dtype=obs[\"agent_fut\"].dtype) * np.nan\n if not np.any(np.isnan(action[\"positions\"][idx, action_index])): # ground truth action may be NaN\n next_state[:2] = action[\"positions\"][idx, action_index] @ world_from_agent + curr_pos\n next_state[2] = curr_yaw + action[\"yaws\"][idx, action_index, 0]\n else:\n pass\n scene_action[agent.name] = next_state\n idx += 1\n scene.step(scene_action, return_obs=False)\n\n self._cached_observation = None\n\n if self._frame_index + num_steps_to_take >= self.horizon:\n self._done = True\n else:\n self._frame_index += num_steps_to_take\n\n def step(self, actions: RolloutAction, num_steps_to_take: int = 1, render=False):\n \"\"\"\n Step the simulation with control inputs\n\n Args:\n actions (RolloutAction): action for controlling ego and/or agents\n num_steps_to_take (int): how many env steps to take. Must be less or equal to length of the input actions\n \"\"\"\n actions = actions.to_numpy()\n self._step(step_actions=actions, num_steps_to_take=num_steps_to_take)\n return []" }, { "identifier": "translate_pass_trajdata_cfg", "path": "tbsim/utils/config_utils.py", "snippet": "def translate_pass_trajdata_cfg(cfg: ExperimentConfig):\n \"\"\"\n Translate a unified passthrough config to trajdata.\n \"\"\"\n rcfg = Dict()\n rcfg.step_time = cfg.algo.step_time\n rcfg.trajdata_cache_location = cfg.train.trajdata_cache_location\n rcfg.trajdata_source_train = cfg.train.trajdata_source_train\n rcfg.trajdata_source_valid = cfg.train.trajdata_source_valid\n rcfg.trajdata_data_dirs = cfg.train.trajdata_data_dirs\n rcfg.trajdata_rebuild_cache = cfg.train.trajdata_rebuild_cache\n\n rcfg.history_num_frames = cfg.algo.history_num_frames\n rcfg.future_num_frames = cfg.algo.future_num_frames\n\n rcfg.trajdata_centric = cfg.env.data_generation_params.trajdata_centric\n rcfg.trajdata_only_types = cfg.env.data_generation_params.trajdata_only_types\n rcfg.trajdata_predict_types = cfg.env.data_generation_params.trajdata_predict_types\n rcfg.trajdata_incl_map = cfg.env.data_generation_params.trajdata_incl_map\n rcfg.max_agents_distance = cfg.env.data_generation_params.trajdata_max_agents_distance\n rcfg.trajdata_standardize_data = cfg.env.data_generation_params.trajdata_standardize_data\n rcfg.trajdata_scene_desc_contains = cfg.env.data_generation_params.trajdata_scene_desc_contains\n\n rcfg.pixel_size = cfg.env.rasterizer.pixel_size\n rcfg.raster_size = int(cfg.env.rasterizer.raster_size)\n rcfg.raster_center = cfg.env.rasterizer.ego_center\n rcfg.num_sem_layers = cfg.env.rasterizer.num_sem_layers\n rcfg.drivable_layers = cfg.env.rasterizer.drivable_layers\n rcfg.no_map_fill_value = cfg.env.rasterizer.no_map_fill_value\n rcfg.raster_include_hist = cfg.env.rasterizer.include_hist\n\n rcfg.lock()\n return rcfg" }, { "identifier": "TRAJDATA_AGENT_TYPE_MAP", "path": "tbsim/utils/trajdata_utils.py", "snippet": "TRAJDATA_AGENT_TYPE_MAP = {\n 'unknown' : AgentType.UNKNOWN, \n 'vehicle' : AgentType.VEHICLE,\n 'pedestrian' : AgentType.PEDESTRIAN,\n 'bicycle' : AgentType.BICYCLE,\n 'motorcycle' : AgentType.MOTORCYCLE\n}" } ]
from collections import defaultdict from trajdata import UnifiedDataset from tbsim.configs.eval_config import EvaluationConfig from tbsim.configs.base import ExperimentConfig from tbsim.envs.env_trajdata import EnvUnifiedSimulation from tbsim.utils.config_utils import translate_pass_trajdata_cfg from tbsim.utils.trajdata_utils import TRAJDATA_AGENT_TYPE_MAP import tbsim.envs.env_metrics as EnvMetrics
13,174
class EnvironmentBuilder(object): """Builds an simulation environment for evaluation.""" def __init__(self, eval_config: EvaluationConfig, exp_config: ExperimentConfig, device): self.eval_cfg = eval_config self.exp_cfg = exp_config self.device = device def _get_analytical_metrics(self): metrics = dict( all_off_road_rate=EnvMetrics.OffRoadRate(), all_disk_off_road_rate=EnvMetrics.DiskOffRoadRate(), all_sem_layer_rate=EnvMetrics.SemLayerRate(), all_collision_rate=EnvMetrics.CollisionRate(), all_disk_collision_rate=EnvMetrics.DiskCollisionRate(), agents_collision_rate=EnvMetrics.CollisionRate(), all_failure=EnvMetrics.CriticalFailure(num_offroad_frames=2), all_comfort=EnvMetrics.Comfort(sim_dt=self.exp_cfg.algo.step_time, stat_dt=0.5), ) return metrics def get_env(self): raise NotImplementedError class EnvUnifiedBuilder(EnvironmentBuilder): def get_env(self): exp_cfg = self.exp_cfg.clone() exp_cfg.unlock() exp_cfg.env.simulation.num_simulation_steps = self.eval_cfg.num_simulation_steps exp_cfg.env.simulation.start_frame_index = exp_cfg.algo.history_num_frames + 1 exp_cfg.lock() # the config used at training time data_cfg = translate_pass_trajdata_cfg(exp_cfg) future_sec = data_cfg.future_num_frames * data_cfg.step_time history_sec = data_cfg.history_num_frames * data_cfg.step_time neighbor_distance = data_cfg.max_agents_distance
class EnvironmentBuilder(object): """Builds an simulation environment for evaluation.""" def __init__(self, eval_config: EvaluationConfig, exp_config: ExperimentConfig, device): self.eval_cfg = eval_config self.exp_cfg = exp_config self.device = device def _get_analytical_metrics(self): metrics = dict( all_off_road_rate=EnvMetrics.OffRoadRate(), all_disk_off_road_rate=EnvMetrics.DiskOffRoadRate(), all_sem_layer_rate=EnvMetrics.SemLayerRate(), all_collision_rate=EnvMetrics.CollisionRate(), all_disk_collision_rate=EnvMetrics.DiskCollisionRate(), agents_collision_rate=EnvMetrics.CollisionRate(), all_failure=EnvMetrics.CriticalFailure(num_offroad_frames=2), all_comfort=EnvMetrics.Comfort(sim_dt=self.exp_cfg.algo.step_time, stat_dt=0.5), ) return metrics def get_env(self): raise NotImplementedError class EnvUnifiedBuilder(EnvironmentBuilder): def get_env(self): exp_cfg = self.exp_cfg.clone() exp_cfg.unlock() exp_cfg.env.simulation.num_simulation_steps = self.eval_cfg.num_simulation_steps exp_cfg.env.simulation.start_frame_index = exp_cfg.algo.history_num_frames + 1 exp_cfg.lock() # the config used at training time data_cfg = translate_pass_trajdata_cfg(exp_cfg) future_sec = data_cfg.future_num_frames * data_cfg.step_time history_sec = data_cfg.history_num_frames * data_cfg.step_time neighbor_distance = data_cfg.max_agents_distance
agent_only_types = [TRAJDATA_AGENT_TYPE_MAP[cur_type] for cur_type in data_cfg.trajdata_only_types]
5
2023-10-31 18:43:07+00:00
16k
nv-tlabs/pacer
uhc/smpllib/np_smpl_humanoid_batch.py
[ { "identifier": "dict_to_torch", "path": "uhc/utils/torch_ext.py", "snippet": "def dict_to_torch(input_dict, dtype = None, device = None, add_dim = False):\n if not isinstance(input_dict, dict):\n return None\n out_dict = {}\n for key, value in input_dict.items():\n if isinstance(value, np.ndarray):\n value = torch.from_numpy(value)\n else:\n pass\n\n if torch.is_tensor(value):\n if dtype is not None:\n value = value.type(dtype)\n if device is not None:\n value = value.to(device)\n if add_dim:\n value = value[None, ]\n\n out_dict[key] = value\n\n return out_dict" }, { "identifier": "SMPLConverter", "path": "uhc/smpllib/smpl_mujoco.py", "snippet": "class SMPLConverter:\nclass SMPL_M_Renderer(object):\nclass SMPL_M_Viewer(object):\n def __init__(self, model, new_model, smpl_model=\"smpl\"):\n def qpos_smpl_2_new(self, qpos):\n def qvel_smpl_2_new(self, qpvel):\n def qpos_new_2_smpl(self, qpos):\n def qvel_new_2_smpl(self, qvel):\n def jpos_new_2_smpl(self, jpos):\n def get_new_qpos_lim(self):\n def get_new_qvel_lim(self):\n def get_new_body_lim(self):\n def get_new_diff_weight(self):\n def get_new_jkp(self):\n def get_new_jkd(self):\n def get_new_a_scale(self):\n def get_new_torque_limit(self):\n def __init__(\n self,\n model_file=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/humanoid_smpl_neutral_mesh.xml\",\n render_size=(960, 480),\n ):\n def render_smpl(\n self,\n body_pose,\n tran=None,\n output_name=None,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n ):\n def render_qpose_and_write(\n self,\n qpos,\n output_name=None,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n follow=False,\n ):\n def render_qpose(\n self,\n qpose,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n follow=False,\n ):\n def show_pose(self, size=(960, 480), loop=False):\n def set_smpl_pose(self, pose, tran=None, offset_z=0):\n def set_smpl_pose_6d(self, full_pose, tran=None, offset_z=0):\n def set_qpose(self, qpose):\n def show_pose_thread(self, return_img=False):\n def __init__(\n self,\n model_file=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/humanoid_smpl_neutral_mesh.xml\",\n render_size=(960, 480),\n ):\n def render_qpose(self, qpose, follow=False):\n def show_pose(self, return_img=False, size=(1920, 1080), loop=False):\n def show_pose_in_thread(self, return_img=False, size=(1920, 1080)):\n def show_pose_thread(self, return_img=False):\n def set_smpl_pose(self, pose, trans=None, offset_z=0):\n def set_smpl_pose_6d(self, full_pose, offset_z=0):\n def set_qpose(self, qpose):\ndef smplh_to_smpl(pose):\ndef smpl_to_smplh(pose):\ndef smpl_to_qpose(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_multi(\n pose,\n offset,\n mujoco_body_order,\n num_people=1,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_torch(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef qpos_to_smpl(qpos, mj_model, smpl_model=\"smpl\"):\ndef qpos_to_smpl_torch(qpos, mj_model, smpl_model=\"smpl\"):\ndef smpl_6d_to_qpose(full_pose, model, normalize=False):\ndef normalize_smpl_pose(pose_aa, trans=None, random_root=False):" }, { "identifier": "SMPL_EE_NAMES", "path": "uhc/smpllib/smpl_parser.py", "snippet": "SMPL_EE_NAMES = [\"L_Ankle\", \"R_Ankle\", \"L_Wrist\", \"R_Wrist\", \"Head\"]" }, { "identifier": "get_expert", "path": "uhc/utils/tools.py", "snippet": "def get_expert(expert_qpos, expert_meta, env):\n old_state = env.sim.get_state()\n expert = defaultdict(list)\n expert[\"qpos\"] = expert_qpos\n expert[\"meta\"] = expert_meta\n feat_keys = {\n \"qvel\",\n \"rlinv\",\n \"rlinv_local\",\n \"rangv\",\n \"rq_rmh\",\n \"com\",\n \"body_com\",\n \"head_pose\",\n \"ee_pos\",\n \"ee_wpos\",\n \"bquat\",\n \"bangvel\",\n \"wbpos\",\n \"wbquat\",\n }\n\n for i in range(expert_qpos.shape[0]):\n qpos = expert_qpos[i]\n env.data.qpos[:76] = qpos\n env.sim.forward()\n rq_rmh = de_heading(qpos[3:7])\n ee_pos = env.get_ee_pos(env.cc_cfg.obs_coord)\n wbpos = env.get_wbody_pos()\n wbquat = env.get_wbody_quat()\n\n ee_wpos = env.get_ee_pos(None)\n bquat = env.get_body_quat() # current pose (body) in quaternion\n com = env.get_com().copy()\n head_pose = env.get_head().copy()\n body_com = env.get_body_com()\n\n if i > 0:\n prev_qpos = expert_qpos[i - 1]\n qvel = get_qvel_fd_new(prev_qpos, qpos, env.dt)\n qvel = qvel.clip(-10.0, 10.0)\n rlinv = qvel[:3].copy()\n rlinv_local = transform_vec(\n qvel[:3].copy(), qpos[3:7], env.cc_cfg.obs_coord\n )\n rangv = qvel[3:6].copy()\n expert[\"qvel\"].append(qvel)\n expert[\"rlinv\"].append(rlinv)\n expert[\"rlinv_local\"].append(rlinv_local)\n expert[\"rangv\"].append(rangv)\n\n expert[\"wbquat\"].append(wbquat)\n expert[\"wbpos\"].append(wbpos)\n expert[\"ee_pos\"].append(ee_pos)\n expert[\"ee_wpos\"].append(ee_wpos)\n expert[\"bquat\"].append(bquat)\n expert[\"com\"].append(com)\n expert[\"body_com\"].append(body_com)\n expert[\"head_pose\"].append(head_pose)\n expert[\"rq_rmh\"].append(rq_rmh)\n\n expert[\"qvel\"].insert(0, expert[\"qvel\"][0].copy())\n expert[\"rlinv\"].insert(0, expert[\"rlinv\"][0].copy())\n expert[\"rlinv_local\"].insert(0, expert[\"rlinv_local\"][0].copy())\n expert[\"rangv\"].insert(0, expert[\"rangv\"][0].copy())\n # get expert body quaternions\n for i in range(1, expert_qpos.shape[0]):\n bangvel = get_angvel_fd(expert[\"bquat\"][i - 1], expert[\"bquat\"][i], env.dt)\n expert[\"bangvel\"].append(bangvel)\n expert[\"bangvel\"].insert(0, expert[\"bangvel\"][0].copy())\n\n for key in feat_keys:\n expert[key] = np.vstack(expert[key])\n\n expert[\"len\"] = expert[\"qpos\"].shape[0]\n expert[\"height_lb\"] = expert[\"qpos\"][:, 2].min()\n expert[\"head_height_lb\"] = expert[\"head_pose\"][:, 2].min()\n if expert_meta[\"cyclic\"]:\n expert[\"init_heading\"] = get_heading_q(expert_qpos[0, 3:7])\n expert[\"init_pos\"] = expert_qpos[0, :3].copy()\n env.sim.set_state(old_state)\n env.sim.forward()\n return expert" }, { "identifier": "get_expert_master", "path": "uhc/utils/tools.py", "snippet": "def get_expert_master(expert_qpos, expert_meta, env):\n old_state = env.sim.get_state()\n expert = defaultdict(list)\n expert_qpos = env.converter.qpos_smpl_2_new(expert_qpos)\n expert[\"qpos\"] = expert_qpos\n expert[\"meta\"] = expert_meta\n feat_keys = {\n \"qvel\",\n \"rlinv\",\n \"rlinv_local\",\n \"rangv\",\n \"rq_rmh\",\n \"com\",\n \"body_com\",\n \"head_pose\",\n \"ee_pos\",\n \"ee_wpos\",\n \"bquat\",\n \"bangvel\",\n \"wbpos\",\n \"wbquat\",\n }\n for i in range(expert_qpos.shape[0]):\n qpos = expert_qpos[i]\n env.data.qpos[: env.qpos_lim] = qpos\n env.sim.forward()\n rq_rmh = de_heading(qpos[3:7])\n ee_pos = env.get_ee_pos(env.cc_cfg.obs_coord)\n wbpos = env.get_wbody_pos()\n wbquat = env.get_wbody_quat()\n\n ee_wpos = env.get_ee_pos(None)\n bquat = env.get_body_quat() # current pose (body) in quaternion\n com = env.get_com()\n head_pose = env.get_head().copy()\n body_com = env.get_body_com()\n\n if i > 0:\n prev_qpos = expert_qpos[i - 1]\n qvel = get_qvel_fd_new(prev_qpos, qpos, env.dt)\n qvel = qvel.clip(-10.0, 10.0)\n rlinv = qvel[:3].copy()\n rlinv_local = transform_vec(\n qvel[:3].copy(), qpos[3:7], env.cc_cfg.obs_coord\n )\n rangv = qvel[3:6].copy()\n expert[\"qvel\"].append(qvel)\n expert[\"rlinv\"].append(rlinv)\n expert[\"rlinv_local\"].append(rlinv_local)\n expert[\"rangv\"].append(rangv)\n\n expert[\"wbquat\"].append(wbquat)\n expert[\"wbpos\"].append(wbpos)\n expert[\"ee_pos\"].append(ee_pos)\n expert[\"ee_wpos\"].append(ee_wpos)\n expert[\"bquat\"].append(bquat)\n expert[\"com\"].append(com)\n expert[\"body_com\"].append(body_com)\n expert[\"head_pose\"].append(head_pose)\n expert[\"rq_rmh\"].append(rq_rmh)\n\n expert[\"qvel\"].insert(0, expert[\"qvel\"][0].copy())\n expert[\"rlinv\"].insert(0, expert[\"rlinv\"][0].copy())\n expert[\"rlinv_local\"].insert(0, expert[\"rlinv_local\"][0].copy())\n expert[\"rangv\"].insert(0, expert[\"rangv\"][0].copy())\n # get expert body quaternions\n for i in range(1, expert_qpos.shape[0]):\n bangvel = get_angvel_fd(expert[\"bquat\"][i - 1], expert[\"bquat\"][i], env.dt)\n expert[\"bangvel\"].append(bangvel)\n expert[\"bangvel\"].insert(0, expert[\"bangvel\"][0].copy())\n\n for key in feat_keys:\n expert[key] = np.vstack(expert[key])\n\n expert[\"len\"] = expert[\"qpos\"].shape[0]\n expert[\"height_lb\"] = expert[\"qpos\"][:, 2].min()\n expert[\"head_height_lb\"] = expert[\"head_pose\"][:, 2].min()\n if expert_meta[\"cyclic\"]:\n expert[\"init_heading\"] = get_heading_q(expert_qpos[0, 3:7])\n expert[\"init_pos\"] = expert_qpos[0, :3].copy()\n env.sim.set_state(old_state)\n env.sim.forward()\n return expert" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n parents_dict = {\n joint_names[i]: joint_names[parents[i]]\n for i in range(len(joint_names))\n }\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names,\n joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self,\n zero_pose=None,\n betas=torch.zeros(1, 10),\n flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(\n betas.shape[0], 1),\n th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]:\n (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:,\n c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate(\n [np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" } ]
import torch import glob import os import sys import pdb import os.path as osp import joblib import pytorch3d.transforms as tR import autograd.numpy as np import time import ipdb from uhc.utils.torch_ext import dict_to_torch from uhc.utils.torch_utils import * from uhc.utils.transform_utils import * from scipy.spatial.transform import Rotation as sRot from uhc.smpllib.smpl_mujoco import SMPLConverter, smpl_to_qpose, smpl_to_qpose_torch, SMPL_BONE_ORDER_NAMES from uhc.smpllib.smpl_parser import SMPL_EE_NAMES from uhc.utils.tools import get_expert, get_expert_master from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from autograd import elementwise_grad as egrad from uhc.smpllib.smpl_robot import Robot from uhc.smpllib.torch_smpl_humanoid import Humanoid from uhc.utils.config_utils.copycat_config import Config from uhc.data_loaders.dataset_amass_single import DatasetAMASSSingle from uhc.utils.torch_ext import dict_to_torch from uhc.smpllib.smpl_mujoco import smpl_to_qpose_torch, smplh_to_smpl
11,659
pred_joints2d.squeeze()[7:8]).squeeze().mean() def fk_batch(self, pose, trans, convert_to_mat=True, count_offset=True): pose, trans = pose.cpu().numpy(), trans.cpu().numpy() B, seq_len = pose.shape[:2] if convert_to_mat: pose_mat = rodrigues(pose.reshape(B * seq_len * 24, 1, 3)).reshape( B, seq_len, -1, 3, 3) else: pose_mat = pose if pose_mat.shape != 5: pose_mat = pose_mat.reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return_dic = {} return_dic["wbpos"] = wbody_pos return_dic["wbmat"] = wbody_mat return return_dic def fk_batch_grad(self, input_vec, count_offset=True): trans, pose = input_vec[:, :, :3], input_vec[:, :, 3:] B, seq_len = pose.shape[:2] pose_mat = rodrigues(pose.reshape(-1, 1, 3)).reshape(B, seq_len, -1, 3, 3) # pose_mat = [ # rodrigues_vec_to_rotation_mat(a) for a in pose.reshape(-1, 3) # ] # pose_mat = np.stack(pose_mat).reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return wbody_pos def get_ee_pos(self, body_xpos, root_q, transform): ee_name = SMPL_EE_NAMES ee_pos = [] root_pos = body_xpos[:, 0, :] for name in ee_name: bone_id = self.model._body_name2id[name] - 1 bone_vec = body_xpos[:, bone_id] if transform is not None: bone_vec = bone_vec - root_pos bone_vec = transform_vec_batch(bone_vec, root_q, transform) ee_pos.append(bone_vec) return torch.swapaxes(torch.stack(ee_pos, dim=0), 0, 1) def forward_kinematics_batch(self, rotations, root_rotations, root_positions): """ Perform forward kinematics using the given trajectory and local rotations. Arguments (where B = batch size, J = number of joints): -- rotations: (B, J, 4) tensor of unit quaternions describing the local rotations of each joint. -- root_positions: (B, 3) tensor describing the root joint positions. Output: joint positions (B, J, 3) """ B, seq_len = rotations.shape[0:2] J = self._offsets.shape[1] positions_world = [] rotations_world = [] expanded_offsets = np.repeat(np.repeat(self._offsets, B, axis=0)[:, None, :], seq_len, axis=1) for i in range(J): if self._parents[i] == -1: positions_world.append(root_positions) rotations_world.append(root_rotations) else: jpos = ( np.matmul(rotations_world[self._parents[i]][:, :, 0], expanded_offsets[:, :, i, :, None]).squeeze(-1) + positions_world[self._parents[i]]) rot_mat = np.matmul(rotations_world[self._parents[i]], rotations[:, :, (i - 1):i, :]) positions_world.append(jpos) rotations_world.append(rot_mat) positions_world = np.stack(positions_world, axis=2) rotations_world = np.concatenate(rotations_world, axis=2) return positions_world, rotations_world if __name__ == "__main__": torch.manual_seed(0) cfg = Config( cfg_id="copycat_44", create_dirs=False, ) smpl_robot = Robot( cfg.robot_cfg, data_dir=osp.join(cfg.base_dir, "data/smpl"), masterfoot=False, ) dataset = DatasetAMASSSingle(cfg.data_specs, "test") humanoid_batch = Humanoid_Batch() data_test = dataset.sample_seq()
# import numpy as np sys.path.append(os.getcwd()) def smpl_op_to_op(pred_joints2d): new_2d = np.concatenate([pred_joints2d[..., [1, 4], :].mean(axis = -2, keepdims = True), \ pred_joints2d[..., 1:7, :], \ pred_joints2d[..., [7, 8, 11], :].mean(axis = -2, keepdims = True), \ pred_joints2d[..., 9:11, :], \ pred_joints2d[..., 12:, :]], \ axis = -2) return new_2d def normalize_screen_coordinates(X, w=1920, h=1080): assert X.shape[-1] == 2 # Normalize so that [0, w] is mapped to # [-1, 1], while preserving the aspect ratio return X / w * 2 - np.array([1, h / w]) def rodrigues(r): """ Rodrigues' rotation formula that turns axis-angle vector into rotation matrix in a batch-ed manner. Parameter: ---------- r: Axis-angle rotation vector of shape [batch_size, 1, 3]. Return: ------- Rotation matrix of shape [batch_size, 3, 3]. """ theta = np.linalg.norm(r, axis=(1, 2))[:, None, None] # avoid zero divide theta = np.maximum(theta, np.finfo(r.dtype).eps) r_hat = r / theta cos = np.cos(theta) z_stick = np.zeros(theta.shape[0]) m = np.stack([ z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick ], axis=1).reshape([-1, 3, 3]) i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3]) A = np.transpose(r_hat, axes=[0, 2, 1]) B = r_hat dot = np.matmul(A, B) R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m return R def rodrigues_vec_to_rotation_mat(rot): theta = np.linalg.norm(rot, axis=0) if theta < sys.float_info.epsilon: rotation_mat = np.eye(3, dtype=float) else: rot = rot / theta I = np.eye(3, dtype=float) r_rT = np.array([[rot[0] * rot[0], rot[0] * rot[1], rot[0] * rot[2]], [rot[1] * rot[0], rot[1] * rot[1], rot[1] * rot[2]], [rot[2] * rot[0], rot[2] * rot[1], rot[2] * rot[2]]]) r_cross = np.array([[0, -rot[2], rot[1]], [rot[2], 0, -rot[0]], [-rot[1], rot[0], 0]]) rotation_mat = np.cos(theta) * I + ( 1 - np.cos(theta)) * r_rT + np.sin(theta) * r_cross return rotation_mat class Humanoid_Batch: def __init__(self, smpl_model="smpl", data_dir="data/smpl"): self.smpl_model = smpl_model if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) self.model_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] self._parents = [ -1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 17, 11, 19, 20, 21, 22 ] self.smpl_index = [ SMPL_BONE_ORDER_NAMES.index(i) for i in self.model_names ] def update_model(self, betas, gender): betas, gender = betas.cpu().float(), gender.cpu().long() B, _ = betas.shape betas_f = betas[gender == 2] if len(betas_f) > 0: _, _, _, _, joint_offsets_f, _, _, _, _, _, _, = self.smpl_parser_f.get_mesh_offsets_batch( betas=betas_f[:, :10]) betas_n = betas[gender == 0] if len(betas_n) > 0: _, _, _, _, joint_offsets_n, _, _, _, _, _, _, = self.smpl_parser_n.get_mesh_offsets_batch( betas=betas_n[:, :10]) betas_m = betas[gender == 1] if len(betas_m) > 0: _, _, _, _, joint_offsets_m, _, _, _, _, _, _, = self.smpl_parser_m.get_mesh_offsets_batch( betas=betas_m[:, :10]) joint_offsets_all = dict() for n in SMPL_BONE_ORDER_NAMES: joint_offsets_all[n] = torch.zeros([B, 3]).float() if len(betas_f) > 0: joint_offsets_all[n][gender == 2] = joint_offsets_f[n] if len(betas_n) > 0: joint_offsets_all[n][gender == 0] = joint_offsets_n[n] if len(betas_m) > 0: joint_offsets_all[n][gender == 1] = joint_offsets_m[n] off_sets = [] for n in self.model_names: off_sets.append(joint_offsets_all[n]) # self._offsets = torch.from_numpy(np.stack(off_sets, axis=1)) self._offsets = np.round(np.stack(off_sets, axis=1), decimals=5) self.trans2joint = -self._offsets[:, 0:1] self.trans2joint[:, :, 2] = 0 # self._offsets = joblib.load("curr_offset.pkl")[None, ] def update_projection(self, cam_params, smpl2op_map, MUJOCO_2_SMPL): self.full_R = cam_params['full_R'] self.full_t = cam_params['full_t'] self.K = cam_params['K'] self.img_w = cam_params['img_w'] self.img_h = cam_params['img_h'] self.openpose_subindex = smpl2op_map < 22 self.smpl2op_map = smpl2op_map self.smpl2op_partial = self.smpl2op_map[self.openpose_subindex] self.MUJOCO_2_SMPL = MUJOCO_2_SMPL def update_tgt_joints(self, tgt_joints, inliers): self.gt_2d_joints = tgt_joints self.inliers = inliers.astype(bool) num_joints = self.gt_2d_joints.shape[-2] self.gt_2d_joints_norm = normalize_screen_coordinates(self.gt_2d_joints, self.img_w, self.img_h) self.num_frames = self.gt_2d_joints.shape[0] self.camera_rays = np.concatenate([self.gt_2d_joints, np.ones([self.num_frames, num_joints, 1])], axis=2).dot(np.linalg.inv(self.K).T) self.camera_rays /= np.linalg.norm(self.camera_rays, axis=2)[..., None] lam = 0.3 self.weighting = np.exp(lam * -np.arange(self.num_frames)) / np.sum( np.exp(lam * -np.arange(self.num_frames))) self.weighting = np.tile(self.weighting[:, None, None], [1, num_joints, 2]) # self.weighting = np.ones(self.num_frames) / self.num_frames def proj2d(self, wbpos, return_cam_3d=False): # wbpos in mujoco pred_joints3d = wbpos.squeeze()[self.MUJOCO_2_SMPL][ self.smpl2op_partial][None, ] pred_joints3d = pred_joints3d @ self.full_R.T + self.full_t pred_joints2d = pred_joints3d @ (self.K.T) z = pred_joints2d[:, :, 2:] pred_joints2d = pred_joints2d[:, :, :2] / z pred_joints2d = smpl_op_to_op(pred_joints2d) if return_cam_3d: return pred_joints2d, pred_joints3d else: return pred_joints2d def proj_2d_line_loss(self, input_vec): wbpos = self.fk_batch_grad(input_vec) _, pred_joints3d = self.proj2d(wbpos, return_cam_3d=True) dist = np.cross(pred_joints3d[0], pred_joints3d[0] - self.camera_rays)**2 return dist.mean() def proj_2d_loss(self, input_vec, ord=2, normalize = True): wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs( gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_body_loss(self, input_vec, ord=2, normalize = False): # Has to use the current translation (to roughly put at the same position, and then zero out the translation) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) gt2d_center = self.gt_2d_joints[..., 7:8, :].copy() pred_joints2d += (gt2d_center - pred_joints2d[..., 7:8, :]) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs(gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_root_loss(self, root_pos_rot): input_vec = np.concatenate( [root_pos_rot.reshape([1, 1, 6]), np.zeros([1, 1, 69])], axis=2) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) return np.abs(self.gt_2d_joints[7:8] - pred_joints2d.squeeze()[7:8]).squeeze().mean() def fk_batch(self, pose, trans, convert_to_mat=True, count_offset=True): pose, trans = pose.cpu().numpy(), trans.cpu().numpy() B, seq_len = pose.shape[:2] if convert_to_mat: pose_mat = rodrigues(pose.reshape(B * seq_len * 24, 1, 3)).reshape( B, seq_len, -1, 3, 3) else: pose_mat = pose if pose_mat.shape != 5: pose_mat = pose_mat.reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return_dic = {} return_dic["wbpos"] = wbody_pos return_dic["wbmat"] = wbody_mat return return_dic def fk_batch_grad(self, input_vec, count_offset=True): trans, pose = input_vec[:, :, :3], input_vec[:, :, 3:] B, seq_len = pose.shape[:2] pose_mat = rodrigues(pose.reshape(-1, 1, 3)).reshape(B, seq_len, -1, 3, 3) # pose_mat = [ # rodrigues_vec_to_rotation_mat(a) for a in pose.reshape(-1, 3) # ] # pose_mat = np.stack(pose_mat).reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return wbody_pos def get_ee_pos(self, body_xpos, root_q, transform): ee_name = SMPL_EE_NAMES ee_pos = [] root_pos = body_xpos[:, 0, :] for name in ee_name: bone_id = self.model._body_name2id[name] - 1 bone_vec = body_xpos[:, bone_id] if transform is not None: bone_vec = bone_vec - root_pos bone_vec = transform_vec_batch(bone_vec, root_q, transform) ee_pos.append(bone_vec) return torch.swapaxes(torch.stack(ee_pos, dim=0), 0, 1) def forward_kinematics_batch(self, rotations, root_rotations, root_positions): """ Perform forward kinematics using the given trajectory and local rotations. Arguments (where B = batch size, J = number of joints): -- rotations: (B, J, 4) tensor of unit quaternions describing the local rotations of each joint. -- root_positions: (B, 3) tensor describing the root joint positions. Output: joint positions (B, J, 3) """ B, seq_len = rotations.shape[0:2] J = self._offsets.shape[1] positions_world = [] rotations_world = [] expanded_offsets = np.repeat(np.repeat(self._offsets, B, axis=0)[:, None, :], seq_len, axis=1) for i in range(J): if self._parents[i] == -1: positions_world.append(root_positions) rotations_world.append(root_rotations) else: jpos = ( np.matmul(rotations_world[self._parents[i]][:, :, 0], expanded_offsets[:, :, i, :, None]).squeeze(-1) + positions_world[self._parents[i]]) rot_mat = np.matmul(rotations_world[self._parents[i]], rotations[:, :, (i - 1):i, :]) positions_world.append(jpos) rotations_world.append(rot_mat) positions_world = np.stack(positions_world, axis=2) rotations_world = np.concatenate(rotations_world, axis=2) return positions_world, rotations_world if __name__ == "__main__": torch.manual_seed(0) cfg = Config( cfg_id="copycat_44", create_dirs=False, ) smpl_robot = Robot( cfg.robot_cfg, data_dir=osp.join(cfg.base_dir, "data/smpl"), masterfoot=False, ) dataset = DatasetAMASSSingle(cfg.data_specs, "test") humanoid_batch = Humanoid_Batch() data_test = dataset.sample_seq()
data_test = dict_to_torch(data_test)
0
2023-10-31 20:47:12+00:00
16k
Improbable-AI/dexenv
dexenv/envs/dclaw_base.py
[ { "identifier": "VecTask", "path": "dexenv/envs/base/vec_task.py", "snippet": "class VecTask(Env):\n\n def __init__(self, config, sim_device, rl_device, graphics_device_id, headless):\n \"\"\"Initialise the `VecTask`.\n Args:\n config: config dictionary for the environment.\n sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'\n graphics_device_id: the device ID to render with.\n headless: Set to False to disable viewer rendering.\n \"\"\"\n super().__init__(config, sim_device, rl_device, graphics_device_id, headless)\n\n self.sim_params = self.__parse_sim_params(self.cfg[\"physics_engine\"], self.cfg[\"sim\"])\n if self.cfg[\"physics_engine\"] == \"physx\":\n self.physics_engine = gymapi.SIM_PHYSX\n elif self.cfg[\"physics_engine\"] == \"flex\":\n self.physics_engine = gymapi.SIM_FLEX\n else:\n msg = f\"Invalid physics engine backend: {self.cfg['physics_engine']}\"\n raise ValueError(msg)\n\n # optimization flags for pytorch JIT\n torch._C._jit_set_profiling_mode(False)\n torch._C._jit_set_profiling_executor(False)\n\n self.gym = gymapi.acquire_gym()\n\n self.first_randomization = True\n self.original_props = {}\n self.dr_randomizations = {}\n self.actor_params_generator = None\n self.extern_actor_params = {}\n self.last_step = -1\n self.last_rand_step = -1\n for env_id in range(self.num_envs):\n self.extern_actor_params[env_id] = None\n\n # create envs, sim and viewer\n self.sim_initialized = False\n self.create_sim()\n self.gym.prepare_sim(self.sim)\n self.sim_initialized = True\n\n self.set_viewer()\n self.allocate_buffers()\n\n self.obs_dict = {}\n\n def set_viewer(self):\n \"\"\"Create the viewer.\"\"\"\n\n # todo: read from config\n self.enable_viewer_sync = True\n self.viewer = None\n\n # if running with a viewer, set up keyboard shortcuts and camera\n if self.headless == False:\n # subscribe to keyboard shortcuts\n self.viewer = self.gym.create_viewer(\n self.sim, gymapi.CameraProperties())\n self.gym.subscribe_viewer_keyboard_event(\n self.viewer, gymapi.KEY_ESCAPE, \"QUIT\")\n self.gym.subscribe_viewer_keyboard_event(\n self.viewer, gymapi.KEY_V, \"toggle_viewer_sync\")\n\n # set the camera position based on up axis\n sim_params = self.gym.get_sim_params(self.sim)\n if sim_params.up_axis == gymapi.UP_AXIS_Z:\n cam_pos = gymapi.Vec3(20.0, 25.0, 3.0)\n cam_target = gymapi.Vec3(10.0, 15.0, 0.0)\n else:\n cam_pos = gymapi.Vec3(20.0, 3.0, 25.0)\n cam_target = gymapi.Vec3(10.0, 0.0, 15.0)\n\n self.gym.viewer_camera_look_at(\n self.viewer, None, cam_pos, cam_target)\n\n def allocate_buffers(self):\n \"\"\"Allocate the observation, states, etc. buffers.\n These are what is used to set observations and states in the environment classes which\n inherit from this one, and are read in `step` and other related functions.\n \"\"\"\n\n # allocate buffers\n self.allocate_ob_buffers()\n self.rew_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.float)\n self.done_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.reset_buf = torch.ones(\n self.num_envs, device=self.device, dtype=torch.long)\n self.timeout_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.progress_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.randomize_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.extras = {}\n\n def allocate_ob_buffers(self):\n self.obs_buf = torch.zeros(\n (self.num_envs, self.num_obs), device=self.device, dtype=torch.float)\n self.states_buf = torch.zeros(\n (self.num_envs, self.num_states), device=self.device, dtype=torch.float)\n\n #\n def set_sim_params_up_axis(self, sim_params: gymapi.SimParams, axis: str) -> int:\n \"\"\"Set gravity based on up axis and return axis index.\n Args:\n sim_params: sim params to modify the axis for.\n axis: axis to set sim params for.\n Returns:\n axis index for up axis.\n \"\"\"\n if axis == 'z':\n sim_params.up_axis = gymapi.UP_AXIS_Z\n sim_params.gravity.x = 0\n sim_params.gravity.y = 0\n sim_params.gravity.z = -9.81\n return 2\n return 1\n\n def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams):\n \"\"\"Create an Isaac Gym sim object.\n Args:\n compute_device: ID of compute device to use.\n graphics_device: ID of graphics device to use.\n physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`)\n sim_params: sim params to use.\n Returns:\n the Isaac Gym sim object.\n \"\"\"\n sim = self.gym.create_sim(compute_device, graphics_device, physics_engine, sim_params)\n if sim is None:\n print(\"*** Failed to create sim\")\n quit()\n\n return sim\n\n def get_state(self):\n \"\"\"Returns the state buffer of the environment (the priviledged observations for asymmetric training).\"\"\"\n return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n\n @abc.abstractmethod\n def pre_physics_step(self, actions: torch.Tensor):\n \"\"\"Apply the actions to the environment (eg by setting torques, position targets).\n Args:\n actions: the actions to apply\n \"\"\"\n\n @abc.abstractmethod\n def post_physics_step(self):\n \"\"\"Compute reward and observations, reset any environments that require it.\"\"\"\n\n def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:\n \"\"\"Step the physics of the environment.\n Args:\n actions: actions to apply\n Returns:\n Observations, rewards, resets, info\n Observations are dict of observations (currently only one member called 'obs')\n \"\"\"\n self.raw_actions_from_policy = actions.clone()\n # randomize actions\n if self.dr_randomizations.get('actions', None):\n actions = self.dr_randomizations['actions']['noise_lambda'](actions)\n action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)\n # apply actions\n self.pre_physics_step(action_tensor)\n\n # # step physics and render each frame\n for i in range(self.control_freq_inv):\n self.render()\n self.gym.simulate(self.sim)\n # to fix!\n if self.device == 'cpu':\n self.gym.fetch_results(self.sim, True)\n\n # fill time out buffer\n self.timeout_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.timeout_buf),\n torch.zeros_like(self.timeout_buf))\n\n # compute observations, rewards, resets, ...\n self.post_physics_step()\n\n self.extras[\"time_outs\"] = self.timeout_buf.to(self.rl_device)\n return self.update_obs(), self.rew_buf.to(self.rl_device), self.done_buf.to(self.rl_device), self.extras\n\n def update_obs(self):\n # randomize observations\n if self.dr_randomizations.get('observations', None):\n self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)\n self.obs_dict[\"ob\"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n\n # asymmetric actor-critic\n if self.num_states > 0:\n self.obs_dict[\"state\"] = self.get_state()\n return self.obs_dict\n\n def zero_actions(self) -> torch.Tensor:\n \"\"\"Returns a buffer with zero actions.\n Returns:\n A buffer of zero torch actions\n \"\"\"\n actions = torch.zeros([self.num_envs, self.num_actions], dtype=torch.float32, device=self.device)\n\n return actions\n\n ## original code from Nvidia\n def reset(self) -> torch.Tensor:\n \"\"\"Reset the environment.\n Returns:\n Observation dictionary\n \"\"\"\n zero_actions = self.zero_actions()\n\n # step the simulator\n self.step(zero_actions)\n\n return self.update_obs()\n\n def render(self):\n \"\"\"Draw the frame to the viewer, and check for keyboard events.\"\"\"\n if self.viewer:\n # check for window closed\n if self.gym.query_viewer_has_closed(self.viewer):\n sys.exit()\n\n # check for keyboard events\n for evt in self.gym.query_viewer_action_events(self.viewer):\n if evt.action == \"QUIT\" and evt.value > 0:\n sys.exit()\n elif evt.action == \"toggle_viewer_sync\" and evt.value > 0:\n self.enable_viewer_sync = not self.enable_viewer_sync\n\n # fetch results\n if self.device != 'cpu':\n self.gym.fetch_results(self.sim, True)\n\n # step graphics\n if self.enable_viewer_sync:\n self.gym.step_graphics(self.sim)\n self.gym.draw_viewer(self.viewer, self.sim, True)\n\n # Wait for dt to elapse in real time.\n # This synchronizes the physics simulation with the rendering rate.\n self.gym.sync_frame_time(self.sim)\n\n else:\n self.gym.poll_viewer_events(self.viewer)\n\n def __parse_sim_params(self, physics_engine: str, config_sim: Dict[str, Any]) -> gymapi.SimParams:\n \"\"\"Parse the config dictionary for physics stepping settings.\n Args:\n physics_engine: which physics engine to use. \"physx\" or \"flex\"\n config_sim: dict of sim configuration parameters\n Returns\n IsaacGym SimParams object with updated settings.\n \"\"\"\n sim_params = gymapi.SimParams()\n\n # check correct up-axis\n if config_sim[\"up_axis\"] not in [\"z\", \"y\"]:\n msg = f\"Invalid physics up-axis: {config_sim['up_axis']}\"\n print(msg)\n raise ValueError(msg)\n\n # assign general sim parameters\n sim_params.dt = config_sim[\"dt\"]\n sim_params.num_client_threads = config_sim.get(\"num_client_threads\", 0)\n sim_params.use_gpu_pipeline = config_sim[\"use_gpu_pipeline\"]\n sim_params.substeps = config_sim.get(\"substeps\", 2)\n\n # assign up-axis\n if config_sim[\"up_axis\"] == \"z\":\n sim_params.up_axis = gymapi.UP_AXIS_Z\n else:\n sim_params.up_axis = gymapi.UP_AXIS_Y\n\n # assign gravity\n sim_params.gravity = gymapi.Vec3(*config_sim[\"gravity\"])\n\n # configure physics parameters\n if physics_engine == \"physx\":\n # set the parameters\n if \"physx\" in config_sim:\n for opt in config_sim[\"physx\"].keys():\n if opt == \"contact_collection\":\n setattr(sim_params.physx, opt, gymapi.ContactCollection(config_sim[\"physx\"][opt]))\n else:\n setattr(sim_params.physx, opt, config_sim[\"physx\"][opt])\n else:\n # set the parameters\n if \"flex\" in config_sim:\n for opt in config_sim[\"flex\"].keys():\n setattr(sim_params.flex, opt, config_sim[\"flex\"][opt])\n\n # return the configured params\n return sim_params\n\n \"\"\"\n Domain Randomization methods\n \"\"\"\n\n def get_actor_params_info(self, dr_params: Dict[str, Any], env):\n \"\"\"Generate a flat array of actor params, their names and ranges.\n Returns:\n The array\n \"\"\"\n\n if \"actor_params\" not in dr_params:\n return None\n params = []\n names = []\n lows = []\n highs = []\n param_getters_map = get_property_getter_map(self.gym)\n for actor, actor_properties in dr_params[\"actor_params\"].items():\n handle = self.gym.find_actor_handle(env, actor)\n for prop_name, prop_attrs in actor_properties.items():\n if prop_name in ['color', 'scale']:\n continue # this is set randomly\n props = param_getters_map[prop_name](env, handle)\n if not isinstance(props, list):\n props = [props]\n for prop_idx, prop in enumerate(props):\n for attr, attr_randomization_params in prop_attrs.items():\n name = prop_name + '_' + str(prop_idx) + '_' + attr\n lo_hi = attr_randomization_params['range']\n distr = attr_randomization_params['distribution']\n if 'uniform' not in distr:\n lo_hi = (-1.0 * float('Inf'), float('Inf'))\n if isinstance(prop, np.ndarray):\n for attr_idx in range(prop[attr].shape[0]):\n params.append(prop[attr][attr_idx])\n names.append(name + '_' + str(attr_idx))\n lows.append(lo_hi[0])\n highs.append(lo_hi[1])\n else:\n params.append(getattr(prop, attr))\n names.append(name)\n lows.append(lo_hi[0])\n highs.append(lo_hi[1])\n return params, names, lows, highs\n\n def apply_randomizations(self, dr_params):\n rand_freq = dr_params.get(\"frequency\", 1)\n self.last_step = self.gym.get_frame_count(self.sim)\n if self.first_randomization:\n do_nonenv_randomize = True\n env_ids = list(range(self.num_envs))\n else:\n do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq\n rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf))\n rand_envs = torch.logical_and(rand_envs, self.reset_buf)\n env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist()\n self.randomize_buf[rand_envs] = 0\n\n if do_nonenv_randomize:\n self.last_rand_step = self.last_step\n\n param_setters_map = get_property_setter_map(self.gym)\n param_setter_defaults_map = get_default_setter_args(self.gym)\n param_getters_map = get_property_getter_map(self.gym)\n\n # On first iteration, check the number of buckets\n if self.first_randomization:\n check_buckets(self.gym, self.envs, dr_params)\n\n for nonphysical_param in [\"observations\", \"actions\"]:\n if nonphysical_param in dr_params and do_nonenv_randomize:\n dist = dr_params[nonphysical_param][\"distribution\"]\n op_type = dr_params[nonphysical_param][\"operation\"]\n sched_type = dr_params[nonphysical_param][\"schedule\"] if \"schedule\" in dr_params[nonphysical_param] else None\n sched_step = dr_params[nonphysical_param][\"schedule_steps\"] if \"schedule\" in dr_params[nonphysical_param] else None\n op = operator.add if op_type == 'additive' else operator.mul\n\n if sched_type == 'linear':\n sched_scaling = 1.0 / sched_step * \\\n min(self.last_step, sched_step)\n elif sched_type == 'constant':\n sched_scaling = 0 if self.last_step < sched_step else 1\n else:\n sched_scaling = 1\n\n if dist == 'gaussian':\n mu, var = dr_params[nonphysical_param][\"range\"]\n mu_corr, var_corr = dr_params[nonphysical_param].get(\"range_correlated\", [0., 0.])\n\n if op_type == 'additive':\n mu *= sched_scaling\n var *= sched_scaling\n mu_corr *= sched_scaling\n var_corr *= sched_scaling\n elif op_type == 'scaling':\n var = var * sched_scaling # scale up var over time\n mu = mu * sched_scaling + 1.0 * \\\n (1.0 - sched_scaling) # linearly interpolate\n\n var_corr = var_corr * sched_scaling # scale up var over time\n mu_corr = mu_corr * sched_scaling + 1.0 * \\\n (1.0 - sched_scaling) # linearly interpolate\n\n def noise_lambda(tensor, param_name=nonphysical_param):\n params = self.dr_randomizations[param_name]\n corr = params.get('corr', None)\n if corr is None:\n corr = torch.randn_like(tensor)\n params['corr'] = corr\n corr = corr * params['var_corr'] + params['mu_corr']\n return op(\n tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu'])\n\n self.dr_randomizations[nonphysical_param] = {'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr,\n 'noise_lambda': noise_lambda}\n\n elif dist == 'uniform':\n lo, hi = dr_params[nonphysical_param][\"range\"]\n lo_corr, hi_corr = dr_params[nonphysical_param].get(\"range_correlated\", [0., 0.])\n\n if op_type == 'additive':\n lo *= sched_scaling\n hi *= sched_scaling\n lo_corr *= sched_scaling\n hi_corr *= sched_scaling\n elif op_type == 'scaling':\n lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling)\n hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling)\n lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)\n hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)\n\n def noise_lambda(tensor, param_name=nonphysical_param):\n params = self.dr_randomizations[param_name]\n corr = params.get('corr', None)\n if corr is None:\n corr = torch.randn_like(tensor)\n params['corr'] = corr\n corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr']\n return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo'])\n\n self.dr_randomizations[nonphysical_param] = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr,\n 'noise_lambda': noise_lambda}\n\n if \"sim_params\" in dr_params and do_nonenv_randomize:\n prop_attrs = dr_params[\"sim_params\"]\n prop = self.gym.get_sim_params(self.sim)\n\n if self.first_randomization:\n self.original_props[\"sim_params\"] = {\n attr: getattr(prop, attr) for attr in dir(prop)}\n\n for attr, attr_randomization_params in prop_attrs.items():\n apply_random_samples(\n prop, self.original_props[\"sim_params\"], attr, attr_randomization_params, self.last_step)\n\n self.gym.set_sim_params(self.sim, prop)\n extern_offsets = {}\n if self.actor_params_generator is not None:\n for env_id in env_ids:\n self.extern_actor_params[env_id] = \\\n self.actor_params_generator.sample()\n extern_offsets[env_id] = 0\n\n for actor, actor_properties in dr_params[\"actor_params\"].items():\n for env_id in env_ids:\n self.original_props.setdefault(env_id, dict())\n env = self.envs[env_id]\n handle = self.gym.find_actor_handle(env, actor)\n self.original_props[env_id].setdefault(handle, dict())\n extern_sample = self.extern_actor_params[env_id]\n\n for prop_name, prop_attrs in actor_properties.items():\n if prop_name == 'color':\n num_bodies = self.gym.get_actor_rigid_body_count(\n env, handle)\n for n in range(num_bodies):\n self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL,\n gymapi.Vec3(random.uniform(0, 1),\n random.uniform(0, 1),\n random.uniform(0, 1)))\n continue\n if prop_name == 'scale':\n setup_only = prop_attrs.get('setup_only', False)\n if (setup_only and not self.sim_initialized) or not setup_only:\n attr_randomization_params = prop_attrs\n sample = generate_random_samples(attr_randomization_params, 1,\n self.last_step, None)\n og_scale = 1\n if attr_randomization_params['operation'] == 'scaling':\n new_scale = og_scale * sample\n elif attr_randomization_params['operation'] == 'additive':\n new_scale = og_scale + sample\n self.gym.set_actor_scale(env, handle, new_scale)\n continue\n\n prop = param_getters_map[prop_name](env, handle)\n set_random_properties = True\n if isinstance(prop, list):\n if self.first_randomization:\n self.original_props[env_id][handle][prop_name] = [\n {attr: getattr(p, attr) for attr in dir(p)} for p in prop]\n for attr, attr_randomization_params in prop_attrs.items():\n same_for_all = attr_randomization_params.get('same_for_all', False)\n setup_only = attr_randomization_params.get('setup_only', False)\n attr_sample = None\n assert len(prop) == len(self.original_props[env_id][handle][prop_name])\n for p, og_p in zip(prop, self.original_props[env_id][handle][prop_name]):\n if (setup_only and not self.sim_initialized) or not setup_only:\n smpl = None\n if self.actor_params_generator is not None:\n smpl, extern_offsets[env_id] = get_attr_val_from_sample(\n extern_sample, extern_offsets[env_id], p, attr)\n if same_for_all and attr_sample is not None:\n apply_prop_samples(p, og_p, attr, attr_randomization_params, attr_sample)\n else:\n attr_sample = apply_random_samples(\n p, og_p, attr, attr_randomization_params,\n self.last_step, smpl)\n else:\n set_random_properties = False\n else:\n if self.first_randomization:\n self.original_props[env_id][handle][prop_name] = deepcopy(prop)\n for attr, attr_randomization_params in prop_attrs.items():\n setup_only = attr_randomization_params.get('setup_only', False)\n if (setup_only and not self.sim_initialized) or not setup_only:\n smpl = None\n if self.actor_params_generator is not None:\n smpl, extern_offsets[env_id] = get_attr_val_from_sample(\n extern_sample, extern_offsets[env_id], prop, attr)\n apply_random_samples(\n prop, self.original_props[env_id][handle][prop_name], attr,\n attr_randomization_params, self.last_step, smpl)\n else:\n set_random_properties = False\n if set_random_properties:\n setter = param_setters_map[prop_name]\n default_args = param_setter_defaults_map[prop_name]\n setter(env, handle, prop, *default_args)\n\n if self.actor_params_generator is not None:\n for env_id in env_ids: # check that we used all dims in sample\n if extern_offsets[env_id] > 0:\n extern_sample = self.extern_actor_params[env_id]\n if extern_offsets[env_id] != extern_sample.shape[0]:\n print('env_id', env_id,\n 'extern_offset', extern_offsets[env_id],\n 'vs extern_sample.shape', extern_sample.shape)\n raise Exception(\"Invalid extern_sample size\")\n self.first_randomization = False\n return env_ids" }, { "identifier": "compute_dclaw_reward", "path": "dexenv/envs/rewards.py", "snippet": "@torch.no_grad()\ndef compute_dclaw_reward(reset_buf, reset_goal_buf, progress_buf,\n successes, max_episode_length: float,\n object_pos, object_rot, target_pos, target_rot,\n reward_cfg, actions,\n fingertip_pos=None, fingertip_vel=None,\n object_linvel=None, object_angvel=None, dof_vel=None,\n dof_torque=None, table_cf=None\n ):\n rot_reward_scale = reward_cfg.rotRewardScale\n rot_eps = reward_cfg.rotEps\n reach_goal_bonus = reward_cfg.reachGoalBonus\n fall_dist = reward_cfg.fallDistance\n fall_penalty = reward_cfg.fallPenalty\n success_tolerance = reward_cfg.successTolerance\n ftip_reward_scale = reward_cfg.ftipRewardScale\n penalize_tb_contact = reward_cfg.pen_tb_contact\n kwargs = dict(\n reset_buf=reset_buf,\n reset_goal_buf=reset_goal_buf,\n progress_buf=progress_buf,\n successes=successes,\n max_episode_length=max_episode_length,\n object_pos=object_pos,\n object_rot=object_rot,\n target_pos=target_pos,\n target_rot=target_rot,\n actions=actions,\n fingertip_pos=fingertip_pos,\n object_linvel=object_linvel,\n object_angvel=object_angvel,\n dof_vel=dof_vel,\n dof_torque=dof_torque,\n rot_reward_scale=rot_reward_scale,\n rot_eps=rot_eps,\n reach_goal_bonus=reach_goal_bonus,\n fall_dist=fall_dist,\n fall_penalty=fall_penalty,\n success_tolerance=success_tolerance,\n ftip_reward_scale=ftip_reward_scale,\n energy_scale=reward_cfg.energy_scale,\n dof_vel_thresh=reward_cfg.dof_vel_thresh,\n obj_lin_vel_thresh=reward_cfg.obj_lin_vel_thresh,\n obj_ang_vel_thresh=reward_cfg.obj_ang_vel_thresh,\n action_norm_thresh=reward_cfg.action_norm_thresh,\n penalize_tb_contact=penalize_tb_contact,\n table_cf=table_cf if table_cf is not None else torch.ones(1),\n tb_cf_scale=reward_cfg.tb_cf_scale,\n clip_energy_reward=reward_cfg.clip_energy_reward,\n energy_upper_bound=reward_cfg.energy_upper_bound,\n )\n out = compute_reward(**kwargs)\n return out" }, { "identifier": "get_module_path", "path": "dexenv/utils/common.py", "snippet": "def get_module_path(module):\n modu = importlib.util.find_spec(module)\n return Path(list(modu.submodule_search_locations)[0])" }, { "identifier": "pathlib_file", "path": "dexenv/utils/common.py", "snippet": "def pathlib_file(file_name):\n if isinstance(file_name, str):\n file_name = Path(file_name)\n elif not isinstance(file_name, Path):\n raise TypeError(f'Please check the type of the filename:{file_name}')\n return file_name" }, { "identifier": "dclaw_body_color_mapping", "path": "dexenv/utils/hand_color.py", "snippet": "FINGERTIP_COLORS = np.array([\n [111, 29, 27],\n [187, 148, 87],\n [67, 40, 24],\n [153, 88, 42],\n [255, 230, 167]\n]) / 255.0\nFINGERTIP_COLORS = FINGERTIP_COLORS.tolist()" }, { "identifier": "get_camera_params", "path": "dexenv/utils/isaac_utils.py", "snippet": "def get_camera_params(width=640, height=480, hov=75, cuda=True):\n camera_props = gymapi.CameraProperties()\n camera_props.horizontal_fov = hov\n camera_props.width = width\n camera_props.height = height\n camera_props.enable_tensors = cuda\n return camera_props" }, { "identifier": "random_quaternions", "path": "dexenv/utils/torch_utils.py", "snippet": "@torch.no_grad()\ndef random_quaternions(num, dtype=None, device=None, order='xyzw'):\n \"\"\"\n return quaternions in [w, x, y, z] or [x, y, z, w]\n \"\"\"\n if PYTORCH3D_AVAILABLE:\n quats = py3d_rot_cvt.random_quaternions(num, dtype=dtype, device=device)\n else:\n \"\"\"\n http://planning.cs.uiuc.edu/node198.html\n \"\"\"\n ran = torch.rand(num, 3, dtype=dtype, device=device)\n r1, r2, r3 = ran[:, 0], ran[:, 1], ran[:, 2]\n pi2 = 2 * np.pi\n r1_1 = torch.sqrt(1.0 - r1)\n r1_2 = torch.sqrt(r1)\n t1 = pi2 * r2\n t2 = pi2 * r3\n\n quats = torch.zeros(num, 4, dtype=dtype, device=device)\n quats[:, 0] = r1_1 * (torch.sin(t1))\n quats[:, 1] = r1_1 * (torch.cos(t1))\n quats[:, 2] = r1_2 * (torch.sin(t2))\n quats[:, 3] = r1_2 * (torch.cos(t2))\n\n assert order in ['xyzw', 'wxyz']\n if order == 'xyzw':\n quats = quat_wxyz_to_xyzw(quats)\n return quats" }, { "identifier": "torch_long", "path": "dexenv/utils/torch_utils.py", "snippet": "def torch_long(array, device='cpu'):\n if isinstance(array, torch.Tensor):\n return array.long().to(device)\n elif isinstance(array, np.ndarray):\n return torch.from_numpy(array).long().to(device)\n elif isinstance(array, list):\n return torch.LongTensor(array).to(device)\n elif isinstance(array, dict):\n new_dict = dict()\n for k, v in array.items():\n new_dict[k] = torch_long(v, device)\n return new_dict" } ]
import time import torch import dexenv from isaacgym import gymapi from isaacgym import gymtorch from isaacgym.gymutil import get_property_getter_map from isaacgym.gymutil import get_property_setter_map from isaacgymenvs.utils.torch_jit_utils import * from loguru import logger from dexenv.envs.base.vec_task import VecTask from dexenv.envs.rewards import compute_dclaw_reward from dexenv.utils.common import get_module_path from dexenv.utils.common import pathlib_file from dexenv.utils.hand_color import dclaw_body_color_mapping from dexenv.utils.isaac_utils import get_camera_params from dexenv.utils.torch_utils import random_quaternions from dexenv.utils.torch_utils import torch_long
10,857
cam_focus_pt = np.array([0.08, 0, 0.15]) cam_focus_pt = gymapi.Vec3(*cam_focus_pt) cam_pos = gymapi.Vec3(*cam_pos) camera_poses = [(cam_pos, cam_focus_pt)] camera_params = get_camera_params(width=self.cfg.cam.visual_render_width, height=self.cfg.cam.visual_render_height, hov=45, cuda=False) return camera_poses, camera_params def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id): dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, "hand", env_id, 0, 0) if self.cfg.env.dof_torque_on: self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.hand_start_states.append( [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z, dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z, dclaw_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL) if self.obs_type == "full_state": self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.dclaws.append(dclaw_actor) self.set_hand_color(env_ptr, dclaw_actor) def set_hand_color(self, env_ptr, dclaw_actor): rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor) for bd, bd_id in rgd_dict.items(): if bd not in dclaw_body_color_mapping: continue color = gymapi.Vec3(*dclaw_body_color_mapping[bd]) self.gym.set_rigid_body_color(env_ptr, dclaw_actor, bd_id, gymapi.MESH_VISUAL, color) def get_table_asset(self): asset_options = gymapi.AssetOptions() asset_options.armature = 0.001 asset_options.fix_base_link = True asset_options.thickness = 0.001 asset_options.disable_gravity = True table_dims = gymapi.Vec3(0.6, 0.6, 0.1) table_asset = self.gym.create_box(self.sim, table_dims.x, table_dims.y, table_dims.z, asset_options) table_props = self.gym.get_asset_rigid_shape_properties(table_asset) for p in table_props: p.friction = self.cfg.env.table.friction p.torsion_friction = self.cfg.env.table.torsion_friction p.restitution = self.cfg.env.table.restitution p.rolling_friction = self.cfg.env.table.rolling_friction self.gym.set_asset_rigid_shape_properties(table_asset, table_props) return table_asset def get_table_pose(self): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = 0 object_start_pose.p.y = 0 object_start_pose.p.z = -0.05 return object_start_pose def get_dclaw_start_pose(self): dclaw_start_pose = gymapi.Transform() dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx)) dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) return dclaw_start_pose def setup_torch_states(self): self.render_rgb_obs_buf = None if self.cfg.rgb_render: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0)) else: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0)) self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = None self.update_obj_mass() self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def get_dclaw_asset(self, asset_root=None, asset_options=None): # load dclaw asset if asset_options is None: asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = False asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.override_inertia = True asset_options.override_com = True logger.info(f'VHACD:{self.cfg.env.vhacd}') if self.cfg.env.vhacd: asset_options.convex_decomposition_from_submeshes = True if self.cfg.physics_engine == "physx": # if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS if asset_root is None: asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix() robot_name = self.cfg.env.robot
class DClawBase(VecTask): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.cfg = cfg headless = self.cfg.headless self.randomize = self.cfg["task"]["randomize"] if self.randomize: logger.warning(f'Domain randomization is enabled!') self.randomization_params = self.cfg["task"]["randomization_params"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["rew"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rew"]["rotRewardScale"] self.success_tolerance = self.cfg["env"]["rew"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["rew"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["rew"]["fallDistance"] self.fall_penalty = self.cfg["env"]["rew"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rew"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.dclaw_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] # self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.object_type = self.cfg["env"]["objectType"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", "airplane": "single_objects/airplane/model.urdf", 'power_drill': 'single_objects/power_drill/model.urdf', 'mug': 'single_objects/mug/model.urdf', 'elephant': 'asymm/train/elephant/var_000/model.urdf', 'train': 'asymm/train/train/var_000/model.urdf', 'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf' } self.objs_in_isaacgym = ['block', 'egg'] if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]") print("Obs type:", self.obs_type) ## TODO: change value here self.num_obs_dict = { "full_no_vel": 42, "full": 87, "full_state": 114 } self.up_axis = 'z' num_states = 0 self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = 12 self.hist_buf_reset_env_ids = None super().__init__(config=self.cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id, headless=headless) self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(0.16, -0.5, 0.5) cam_target = gymapi.Vec3(0.0, 0.0, 0.15) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) if self.obs_type == "full_state": sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dclaw_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) if self.cfg.env.dof_torque_on: self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs] self.dclaw_dof_pos = self.dclaw_dof_state[..., 0] self.dclaw_dof_vel = self.dclaw_dof_state[..., 1] if self.cfg.env.dof_torque_on: self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1) else: self.dclaw_dof_torque = None self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) if self.cfg.env.rew.pen_tb_contact: _net_cf = self.gym.acquire_net_contact_force_tensor(self.sim) self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3) table_handle = self.gym.find_actor_handle(self.envs[0], 'table') self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0], table_handle, 'table', gymapi.DOMAIN_ENV) logger.warning(f'Table body index:{self.table_body_index}') self.table_contact_force = self.net_contact_force[:, self.table_body_index] self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log( self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.num_actions = self.num_dclaw_dofs self.actions = self.zero_actions() DClawBase.compute_observations(self) self.num_observations = self.obs_buf.shape[-1] self.cfg.env.numObservations = self.num_observations self.create_ob_act_space() def create_sim(self): self.dt = self.cfg["sim"]["dt"] self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis) self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.distance = 0.1 self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() object_asset_file = self.asset_files_dict[self.object_type] dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) table_asset = self.get_table_asset() table_pose = self.get_table_pose() if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) if self.object_type in self.objs_in_isaacgym: asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix() else: asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix() object_asset_options = gymapi.AssetOptions() if self.cfg.env.vhacd: object_asset_options.convex_decomposition_from_submeshes = True object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] print(f'Fingertip handles:{self.fingertip_handles}') dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1 max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1 for i in range(self.num_envs): env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block': blockscale = float(self.cfg.env.blockscale) self.gym.set_actor_scale(env_ptr, object_handle, blockscale) self.gym.set_actor_scale(env_ptr, goal_handle, blockscale) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.setup_torch_states() def create_camera(self, camera_poses, env_ptr, camera_params): cam_handles = [] for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)): camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params) if isinstance(camera_poses[ic], tuple): self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1]) else: self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic]) cam_handles.append(camera_handle) return cam_handles def get_visual_render_camera_setup(self): cam_pos = np.array([-0.7, 0, 0.5]) cam_focus_pt = np.array([0.08, 0, 0.15]) cam_focus_pt = gymapi.Vec3(*cam_focus_pt) cam_pos = gymapi.Vec3(*cam_pos) camera_poses = [(cam_pos, cam_focus_pt)] camera_params = get_camera_params(width=self.cfg.cam.visual_render_width, height=self.cfg.cam.visual_render_height, hov=45, cuda=False) return camera_poses, camera_params def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id): dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, "hand", env_id, 0, 0) if self.cfg.env.dof_torque_on: self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.hand_start_states.append( [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z, dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z, dclaw_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL) if self.obs_type == "full_state": self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.dclaws.append(dclaw_actor) self.set_hand_color(env_ptr, dclaw_actor) def set_hand_color(self, env_ptr, dclaw_actor): rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor) for bd, bd_id in rgd_dict.items(): if bd not in dclaw_body_color_mapping: continue color = gymapi.Vec3(*dclaw_body_color_mapping[bd]) self.gym.set_rigid_body_color(env_ptr, dclaw_actor, bd_id, gymapi.MESH_VISUAL, color) def get_table_asset(self): asset_options = gymapi.AssetOptions() asset_options.armature = 0.001 asset_options.fix_base_link = True asset_options.thickness = 0.001 asset_options.disable_gravity = True table_dims = gymapi.Vec3(0.6, 0.6, 0.1) table_asset = self.gym.create_box(self.sim, table_dims.x, table_dims.y, table_dims.z, asset_options) table_props = self.gym.get_asset_rigid_shape_properties(table_asset) for p in table_props: p.friction = self.cfg.env.table.friction p.torsion_friction = self.cfg.env.table.torsion_friction p.restitution = self.cfg.env.table.restitution p.rolling_friction = self.cfg.env.table.rolling_friction self.gym.set_asset_rigid_shape_properties(table_asset, table_props) return table_asset def get_table_pose(self): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = 0 object_start_pose.p.y = 0 object_start_pose.p.z = -0.05 return object_start_pose def get_dclaw_start_pose(self): dclaw_start_pose = gymapi.Transform() dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx)) dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) return dclaw_start_pose def setup_torch_states(self): self.render_rgb_obs_buf = None if self.cfg.rgb_render: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0)) else: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0)) self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = None self.update_obj_mass() self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def get_dclaw_asset(self, asset_root=None, asset_options=None): # load dclaw asset if asset_options is None: asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = False asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.override_inertia = True asset_options.override_com = True logger.info(f'VHACD:{self.cfg.env.vhacd}') if self.cfg.env.vhacd: asset_options.convex_decomposition_from_submeshes = True if self.cfg.physics_engine == "physx": # if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS if asset_root is None: asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix() robot_name = self.cfg.env.robot
asset_root = pathlib_file(asset_root).parent.joinpath(f'{robot_name}').as_posix()
3
2023-10-25 17:22:41+00:00
16k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # CoreML: *.mlmodel\n # TensorFlow: *_saved_model\n # TensorFlow: *.pb\n # TensorFlow Lite: *.tflite\n # ONNX Runtime: *.onnx\n # OpenCV DNN: *.onnx with dnn=True\n # TensorRT: *.engine\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n suffix = Path(w).suffix.lower()\n suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']\n check_suffix(w, suffixes) # check weights have acceptable suffix\n pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans\n stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults\n w = attempt_download(w) # download if not local\n\n if jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files)\n if extra_files['config.txt']:\n d = json.loads(extra_files['config.txt']) # extra_files dict\n stride, names = int(d['stride']), d['names']\n elif pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)\n stride = int(model.stride.max()) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements(('opencv-python>=4.5.4',))\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n cuda = torch.cuda.is_available()\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '8.0.0', verbose=True) # version requirement\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n bindings = OrderedDict()\n for index in range(model.num_bindings):\n name = model.get_binding_name(index)\n dtype = trt.nptype(model.get_binding_dtype(index))\n shape = tuple(model.get_binding_shape(index))\n data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)\n bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n context = model.create_execution_context()\n batch_size = bindings['images'].shape[0]\n else: # TensorFlow model (TFLite, pb, saved_model)\n if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),\n tf.nest.map_structure(x.graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n graph_def.ParseFromString(open(w, 'rb').read())\n frozen_func = wrap_frozen_graph(gd=graph_def, inputs=\"x:0\", outputs=\"Identity:0\")\n elif saved_model:\n LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')\n import tensorflow as tf\n model = tf.keras.models.load_model(w)\n elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n if 'edgetpu' in w.lower():\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n import tflite_runtime.interpreter as tfli\n delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])\n else:\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n import tensorflow as tf\n interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False, val=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.pt or self.jit: # PyTorch\n y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)\n return y if val else y[0]\n elif self.coreml: # CoreML\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n elif self.onnx: # ONNX\n im = im.cpu().numpy() # torch to numpy\n if self.dnn: # ONNX OpenCV DNN\n self.net.setInput(im)\n y = self.net.forward()\n else: # ONNX Runtime\n y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n elif self.engine: # TensorRT\n assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = self.bindings['output'].data\n else: # TensorFlow model (TFLite, pb, saved_model)\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n if self.pb:\n y = self.frozen_func(x=self.tf.constant(im)).numpy()\n elif self.saved_model:\n y = self.model(im, training=False).numpy()\n elif self.tflite:\n input, output = self.input_details[0], self.output_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n y = (y.astype(np.float32) - zero_point) * scale # re-scale\n y[..., 0] *= w # x\n y[..., 1] *= h # y\n y[..., 2] *= w # w\n y[..., 3] *= h # h\n y = torch.tensor(y) if isinstance(y, np.ndarray) else y\n return (y, []) if val else y\n\n def warmup(self, imgsz=(1, 3, 640, 640), half=False):\n # Warmup model by running inference once\n if self.pt or self.engine or self.onnx: # warmup types\n if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models\n im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image\n self.forward(im) # warmup" }, { "identifier": "IMG_FORMATS", "path": "utils/datasets.py", "snippet": "IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/datasets.py", "snippet": "VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap, s\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources) as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file, suffix=''):\n # Search/download file (if necessary) and return path\n check_suffix(file, suffix) # optional\n file = str(file) # convert to str()\n if Path(file).is_file() or file == '': # exists\n return file\n elif file.startswith(('http:/', 'https:/')): # download\n url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth\n if Path(file).is_file():\n print(f'Found {url} locally at {file}') # file already exists\n else:\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check\n return file\n else: # search\n files = []\n for d in 'data', 'models', 'utils': # search directories\n files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file\n assert len(files), f'File not found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n print(f\"#305 in utils/general.py - s={s}\")\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "non_max_suppression_obb", "path": "utils/general.py", "snippet": "def non_max_suppression_obb(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=1500):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results_obb\n Args:\n prediction (tensor): (b, n_all_anchors, [cx cy l s obj num_cls theta_cls])\n agnostic (bool): True = NMS will be applied between elements of different categories\n labels : () or\n\n Returns:\n list of detections, len=batch_size, on (n,7) tensor per image [xylsθ, conf, cls] θ ∈ [-pi/2, pi/2)\n \"\"\"\n\n nc = prediction.shape[2] - 5 - 180 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n class_index = nc + 5\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n max_wh = 4096 # min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 30.0 # seconds to quit after\n # redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [torch.zeros((0, 7), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence, (tensor): (n_conf_thres, [cx cy l s obj num_cls theta_cls])\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:class_index] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n thete_index, theta_pred = torch.max(x[:, class_index:], 1, keepdim=True) # [n_conf_thres, 1] θ ∈ int[0, 179]\n theta_pred = (theta_pred - 90) / 180 * pi # [n_conf_thres, 1] θ ∈ [-pi/2, pi/2)\n\n # Detections matrix nx7 (xyls, θ, conf, cls) θ ∈ [-pi/2, pi/2)\n if multi_label:\n i, j = (x[:, 5:class_index] > conf_thres).nonzero(as_tuple=False).T # ()\n x = torch.cat((x[i, :4], theta_pred[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:class_index].max(1, keepdim=True)\n x = torch.cat((x[:, :4], theta_pred, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 6:7] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 5].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 6:7] * (0 if agnostic else max_wh) # classes\n rboxes = x[:, :5].clone() \n rboxes[:, :2] = rboxes[:, :2] + c # rboxes (offset by class)\n scores = x[:, 5] # scores\n _, i = obb_nms(rboxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "scale_polys", "path": "utils/general.py", "snippet": "def scale_polys(img1_shape, polys, img0_shape, ratio_pad=None):\n # ratio_pad: [(h_raw, w_raw), (hw_ratios, wh_paddings)]\n # Rescale coords (xyxyxyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = resized / raw\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0] # h_ratios\n pad = ratio_pad[1] # wh_paddings\n\n polys[:, [0, 2, 4, 6]] -= pad[0] # x padding\n polys[:, [1, 3, 5, 7]] -= pad[1] # y padding\n polys[:, :8] /= gain # Rescale poly shape to img0_shape\n #clip_polys(polys, img0_shape)\n return polys" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def poly_label(self, poly, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path(''), img_size=1024):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "poly2rbox", "path": "utils/rboxs_utils.py", "snippet": "def poly2rbox(polys, num_cls_thata=180, radius=6.0, use_pi=False, use_gaussian=False):\n \"\"\"\n Trans poly format to rbox format.\n Args:\n polys (array): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n num_cls_thata (int): [1], theta class num\n radius (float32): [1], window radius for Circular Smooth Label\n use_pi (bool): True θ∈[-pi/2, pi/2) , False θ∈[0, 180)\n\n Returns:\n use_gaussian True:\n rboxes (array): \n csl_labels (array): (num_gts, num_cls_thata)\n elif \n rboxes (array): (num_gts, [cx cy l s θ]) \n \"\"\"\n assert polys.shape[-1] == 8\n if use_gaussian:\n csl_labels = []\n rboxes = []\n for poly in polys:\n poly = np.float32(poly.reshape(4, 2))\n (x, y), (w, h), angle = cv2.minAreaRect(poly) # θ ∈ [0, 90]\n angle = -angle # θ ∈ [-90, 0]\n theta = angle / 180 * pi # 转为pi制\n\n # trans opencv format to longedge format θ ∈ [-pi/2, pi/2]\n if w != max(w, h): \n w, h = h, w\n theta += pi/2\n theta = regular_theta(theta) # limit theta ∈ [-pi/2, pi/2)\n angle = (theta * 180 / pi) + 90 # θ ∈ [0, 180)\n\n if not use_pi: # 采用angle弧度制 θ ∈ [0, 180)\n rboxes.append([x, y, w, h, angle])\n else: # 采用pi制\n rboxes.append([x, y, w, h, theta])\n if use_gaussian:\n csl_label = gaussian_label_cpu(label=angle, num_class=num_cls_thata, u=0, sig=radius)\n csl_labels.append(csl_label)\n if use_gaussian:\n return np.array(rboxes), np.array(csl_labels)\n return np.array(rboxes)" }, { "identifier": "rbox2poly", "path": "utils/rboxs_utils.py", "snippet": "def rbox2poly(obboxes):\n \"\"\"\n Trans rbox format to poly format.\n Args:\n rboxes (array/tensor): (num_gts, [cx cy l s θ]) θ∈[-pi/2, pi/2)\n\n Returns:\n polys (array/tensor): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n \"\"\"\n if isinstance(obboxes, torch.Tensor):\n center, w, h, theta = obboxes[:, :2], obboxes[:, 2:3], obboxes[:, 3:4], obboxes[:, 4:5]\n Cos, Sin = torch.cos(theta), torch.sin(theta)\n\n vector1 = torch.cat(\n (w/2 * Cos, -w/2 * Sin), dim=-1)\n vector2 = torch.cat(\n (-h/2 * Sin, -h/2 * Cos), dim=-1)\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return torch.cat(\n (point1, point2, point3, point4), dim=-1).reshape(*order, 8)\n else:\n center, w, h, theta = np.split(obboxes, (2, 3, 4), axis=-1)\n Cos, Sin = np.cos(theta), np.sin(theta)\n\n vector1 = np.concatenate(\n [w/2 * Cos, -w/2 * Sin], axis=-1)\n vector2 = np.concatenate(\n [-h/2 * Sin, -h/2 * Cos], axis=-1)\n\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return np.concatenate(\n [point1, point2, point3, point4], axis=-1).reshape(*order, 8)" } ]
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
14,097
""" FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape)
16
2023-10-31 06:06:41+00:00
16k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/mapping.py
[ { "identifier": "PeftModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModel(PushToHubMixin, torch.nn.Module):\n \"\"\"\n Base model encompassing various Peft methods.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.\n peft_config ([`PeftConfig`]): The configuration of the Peft model.\n\n\n **Attributes**:\n - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft.\n - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.\n - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when\n saving the model.\n - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if\n using [`PromptLearningConfig`].\n - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if\n using [`PromptLearningConfig`].\n - **transformer_backbone_name** (`str`) -- The name of the transformer\n backbone in the base model if using [`PromptLearningConfig`].\n - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone\n in the base model if using [`PromptLearningConfig`].\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__()\n self.base_model = model\n self.config = self.base_model.config\n self.modules_to_save = None\n self.peft_config = {}\n self.active_adapter = adapter_name\n self.peft_type = peft_config.peft_type\n self.base_model_torch_dtype = getattr(model, \"dtype\", None)\n if not isinstance(peft_config, PromptLearningConfig):\n self.peft_config[adapter_name] = peft_config\n self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type](\n self.base_model, self.peft_config, adapter_name\n )\n else:\n self.add_adapter(adapter_name, peft_config)\n\n def save_pretrained(self, save_directory, **kwargs):\n r\"\"\"\n This function saves the adapter model and the adapter configuration files to a directory, so that it can be\n reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`]\n method.\n\n Args:\n save_directory (`str`):\n Directory where the adapter model and configuration files will be saved (will be created if it does not\n exist).\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the `push_to_hub` method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise ValueError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n os.makedirs(save_directory, exist_ok=True)\n\n for adapter_name, peft_config in self.peft_config.items():\n # save only the trainable weights\n output_state_dict = get_peft_model_state_dict(\n self, state_dict=kwargs.get(\"state_dict\", None), adapter_name=adapter_name\n )\n output_dir = os.path.join(save_directory, adapter_name) if adapter_name != \"default\" else save_directory\n os.makedirs(output_dir, exist_ok=True)\n torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n\n # save the config and change the inference mode to `True`\n if peft_config.base_model_name_or_path is None:\n peft_config.base_model_name_or_path = (\n self.base_model.__dict__.get(\"name_or_path\", None)\n if isinstance(peft_config, PromptLearningConfig)\n else self.base_model.model.__dict__.get(\"name_or_path\", None)\n )\n inference_mode = peft_config.inference_mode\n peft_config.inference_mode = True\n peft_config.save_pretrained(output_dir)\n peft_config.inference_mode = inference_mode\n\n @classmethod\n def from_pretrained(cls, model, model_id, adapter_name=\"default\", is_trainable=False, **kwargs):\n r\"\"\"\n Instantiate a [`LoraModel`] from a pretrained Lora configuration and weights.\n\n Args:\n model ([`~transformers.PreTrainedModel`]):\n The model to be adapted. The model should be initialized with the\n [`~transformers.PreTrainedModel.from_pretrained`] method from the 🤗 Transformers library.\n model_id (`str` or `os.PathLike`):\n The name of the Lora configuration to use. Can be either:\n - A string, the `model id` of a Lora configuration hosted inside a model repo on the Hugging Face\n Hub.\n - A path to a directory containing a Lora configuration file saved using the `save_pretrained`\n method (`./my_lora_config_directory/`).\n \"\"\"\n from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING\n\n # load the config\n config = PEFT_TYPE_TO_CONFIG_MAPPING[\n PeftConfig.from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None)).peft_type\n ].from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None))\n print(\"Config: \", config)\n\n if (getattr(model, \"hf_device_map\", None) is not None) and len(\n set(model.hf_device_map.values()).intersection({\"cpu\", \"disk\"})\n ) > 0:\n remove_hook_from_submodules(model)\n\n if isinstance(config, PromptLearningConfig) and is_trainable:\n raise ValueError(\"Cannot set a prompt learning adapter to trainable when loading pretrained adapter.\")\n else:\n config.inference_mode = not is_trainable\n\n if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():\n model = cls(model, config, adapter_name)\n else:\n model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)\n model.load_adapter(model_id, adapter_name, **kwargs)\n return model\n\n def _setup_prompt_encoder(self, adapter_name):\n config = self.peft_config[adapter_name]\n self.prompt_encoder = torch.nn.ModuleDict({})\n self.prompt_tokens = {}\n transformer_backbone = None\n for name, module in self.base_model.named_children():\n for param in module.parameters():\n param.requires_grad = False\n if isinstance(module, PreTrainedModel):\n # Make sure to freeze Tranformers model\n if transformer_backbone is None:\n transformer_backbone = module\n self.transformer_backbone_name = name\n\n if config.num_transformer_submodules is None:\n config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1\n\n for named_param, value in list(transformer_backbone.named_parameters()):\n if value.shape[0] == self.base_model.config.vocab_size:\n self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(\".weight\", \"\"))\n break\n\n if config.peft_type == PeftType.PROMPT_TUNING:\n prompt_encoder = PromptEmbedding(config, self.word_embeddings)\n elif config.peft_type == PeftType.P_TUNING:\n prompt_encoder = PromptEncoder(config)\n elif config.peft_type == PeftType.PREFIX_TUNING:\n prompt_encoder = PrefixEncoder(config)\n else:\n raise ValueError(\"Not supported\")\n self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))\n self.prompt_tokens[adapter_name] = torch.arange(\n config.num_virtual_tokens * config.num_transformer_submodules\n ).long()\n\n def get_prompt_embedding_to_save(self, adapter_name):\n \"\"\"\n Returns the prompt embedding to save when saving the model. Only applicable when `peft_config.peft_type !=\n PeftType.LORA`.\n \"\"\"\n prompt_tokens = self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(self.device)\n if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:\n prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]\n prompt_embeddings = self.prompt_encoder[adapter_name](prompt_tokens)\n return prompt_embeddings[0].detach().cpu()\n\n def get_prompt(self, batch_size):\n \"\"\"\n Returns the virtual prompts to use for Peft. Only applicable when `peft_config.peft_type != PeftType.LORA`.\n \"\"\"\n peft_config = self.active_peft_config\n prompt_encoder = self.prompt_encoder[self.active_adapter]\n prompt_tokens = self.prompt_tokens[self.active_adapter].unsqueeze(0).expand(batch_size, -1).to(self.device)\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]\n if peft_config.inference_mode:\n past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)\n else:\n past_key_values = prompt_encoder(prompt_tokens)\n past_key_values = past_key_values.view(\n batch_size,\n peft_config.num_virtual_tokens,\n peft_config.num_layers * 2,\n peft_config.num_attention_heads,\n peft_config.token_dim // peft_config.num_attention_heads,\n )\n if peft_config.num_transformer_submodules == 2:\n past_key_values = torch.cat([past_key_values, past_key_values], dim=2)\n past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(\n peft_config.num_transformer_submodules * 2\n )\n if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:\n post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]\n past_key_values = post_process_fn(past_key_values)\n return past_key_values\n else:\n if peft_config.inference_mode:\n prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)\n else:\n prompts = prompt_encoder(prompt_tokens)\n return prompts\n\n def print_trainable_parameters(self):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in self.named_parameters():\n num_params = param.numel()\n # if using DS Zero 3 and the weights are initialized empty\n if num_params == 0 and hasattr(param, \"ds_numel\"):\n num_params = param.ds_numel\n\n all_param += num_params\n if param.requires_grad:\n trainable_params += num_params\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.base_model, name)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Forward pass of the model.\n \"\"\"\n return self.get_base_model()(*args, **kwargs)\n\n @contextmanager\n def disable_adapter(self):\n \"\"\"\n Disables the adapter module.\n \"\"\"\n if isinstance(self.active_peft_config, PromptLearningConfig):\n old_forward = self.forward\n self.forward = self.base_model.forward\n else:\n self.base_model.disable_adapter_layers()\n yield\n if isinstance(self.active_peft_config, PromptLearningConfig):\n self.forward = old_forward\n else:\n self.base_model.enable_adapter_layers()\n\n def get_base_model(self):\n \"\"\"\n Returns the base model.\n \"\"\"\n return self.base_model if isinstance(self.active_peft_config, PromptLearningConfig) else self.base_model.model\n\n def add_adapter(self, adapter_name, peft_config):\n if peft_config.peft_type != self.peft_type:\n raise ValueError(\n f\"Cannot combine adapters with different peft types. \"\n f\"Found {self.peft_type} and {peft_config.peft_type}.\"\n )\n self.peft_config[adapter_name] = peft_config\n if isinstance(peft_config, PromptLearningConfig):\n self._setup_prompt_encoder(adapter_name)\n else:\n self.base_model.add_adapter(adapter_name, peft_config)\n if getattr(peft_config, \"modules_to_save\", None) is not None:\n if self.modules_to_save is None:\n self.modules_to_save = set(peft_config.modules_to_save)\n else:\n self.modules_to_save = self.modules_to_save.update(peft_config.modules_to_save)\n _set_trainable(self, adapter_name)\n\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):\n from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING\n\n if adapter_name not in self.peft_config:\n # load the config\n peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[\n PeftConfig.from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None)).peft_type\n ].from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None))\n if isinstance(peft_config, PromptLearningConfig) and is_trainable:\n raise ValueError(\"Cannot set a prompt learning adapter to trainable when loading pretrained adapter.\")\n else:\n peft_config.inference_mode = not is_trainable\n self.add_adapter(adapter_name, peft_config)\n\n # load weights if any\n path = os.path.join(model_id, kwargs[\"subfolder\"]) if kwargs.get(\"subfolder\", None) is not None else model_id\n print(\"Load from adapter:\", WEIGHTS_NAME)\n if os.path.exists(os.path.join(path, WEIGHTS_NAME)):\n filename = os.path.join(path, WEIGHTS_NAME)\n else:\n try:\n filename = hf_hub_download(model_id, WEIGHTS_NAME, subfolder=kwargs.get(\"subfolder\", None))\n except: # noqa\n raise ValueError(\n f\"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. \"\n f\"Please check that the file {WEIGHTS_NAME} is present at {model_id}.\"\n )\n\n adapters_weights = torch.load(\n filename, map_location=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n )\n # load the weights into the model\n set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name)\n if (\n (getattr(self, \"hf_device_map\", None) is not None)\n and (len(set(self.hf_device_map.values()).intersection({\"cpu\", \"disk\"})) > 0)\n and len(self.peft_config) == 1\n ):\n device_map = kwargs.get(\"device_map\", \"auto\")\n max_memory = kwargs.get(\"max_memory\", None)\n offload_dir = kwargs.get(\"offload_folder\", None)\n offload_index = kwargs.get(\"offload_index\", None)\n\n dispatch_model_kwargs = {}\n # Safety checker for previous `accelerate` versions\n # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/\n if \"offload_index\" in inspect.signature(dispatch_model).parameters:\n dispatch_model_kwargs[\"offload_index\"] = offload_index\n\n no_split_module_classes = self._no_split_modules\n\n if device_map != \"sequential\":\n max_memory = get_balanced_memory(\n self,\n max_memory=max_memory,\n no_split_module_classes=no_split_module_classes,\n low_zero=(device_map == \"balanced_low_0\"),\n )\n if isinstance(device_map, str):\n device_map = infer_auto_device_map(\n self, max_memory=max_memory, no_split_module_classes=no_split_module_classes\n )\n dispatch_model(\n self,\n device_map=device_map,\n offload_dir=offload_dir,\n **dispatch_model_kwargs,\n )\n hook = AlignDevicesHook(io_same_device=True)\n if isinstance(self.peft_config[adapter_name], PromptLearningConfig):\n remove_hook_from_submodules(self.prompt_encoder)\n add_hook_to_module(self.get_base_model(), hook)\n\n # Set model in evaluation mode to deactivate Dropout modules by default\n self.eval()\n\n def set_adapter(self, adapter_name):\n \"\"\"\n Sets the active adapter.\n \"\"\"\n if adapter_name not in self.peft_config:\n raise ValueError(f\"Adapter {adapter_name} not found.\")\n self.active_adapter = adapter_name\n if not isinstance(self.peft_config[adapter_name], PromptLearningConfig):\n self.base_model.set_adapter(adapter_name)\n _set_adapter(self, adapter_name)\n\n @property\n def active_peft_config(self):\n return self.peft_config[self.active_adapter]" }, { "identifier": "PeftModelForCausalLM", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForCausalLM(PeftModel):\n \"\"\"\n Peft model for causal language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForCausalLM\n >>> from peft import PeftModelForCausalLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"CAUSAL_LM\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 1280,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 20,\n ... \"num_layers\": 36,\n ... \"encoder_hidden_size\": 1280,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2-large\")\n >>> peft_model = PeftModelForCausalLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs)\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n # concat prompt labels\n if labels is not None:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def generate(self, **kwargs):\n peft_config = self.active_peft_config\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n try:\n if not isinstance(peft_config, PromptLearningConfig):\n outputs = self.base_model.generate(**kwargs)\n else:\n if \"input_ids\" not in kwargs:\n raise ValueError(\"input_ids must be provided for Peft model generation\")\n # For gpt2 models, we construct postion_ids on the fly by using attention mask, and position ids need to match input_shape.\n # for prefix tuning, input shape is determined using `input_ids`. Thus we should not expand 'attention_mask' here\n # for prompt tuning input_ids is not passed but a concatenated input_embeds is passed. Thus attention_mask needs to be of same size of num_virtual_tokens + input_ids\n if kwargs.get(\"attention_mask\", None) is not None and peft_config.peft_type in [\n PeftType.PROMPT_TUNING,\n PeftType.P_TUNING,\n ]:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(\n kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(kwargs[\"input_ids\"].device)\n kwargs[\"attention_mask\"] = torch.cat((prefix_attention_mask, kwargs[\"attention_mask\"]), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\n \"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\"\n )\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n outputs = self.base_model.generate(**kwargs)\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if isinstance(peft_config, PromptLearningConfig):\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n prefix_attention_mask = torch.ones(\n model_kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(model_kwargs[\"input_ids\"].device)\n model_kwargs[\"attention_mask\"] = torch.cat(\n (prefix_attention_mask, model_kwargs[\"attention_mask\"]), dim=1\n )\n\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n\n if self.base_model_torch_dtype is not None:\n # handle the case for Bloom where it outputs tuple of tuples\n if isinstance(past_key_values[0], tuple):\n past_key_values = tuple(\n tuple(\n past_key_value.to(self.base_model_torch_dtype)\n for past_key_value in past_key_value_tuple\n )\n for past_key_value_tuple in past_key_values\n )\n else:\n past_key_values = tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values\n )\n\n model_kwargs[\"past_key_values\"] = past_key_values\n else:\n if model_kwargs[\"past_key_values\"] is None:\n inputs_embeds = self.word_embeddings(model_kwargs[\"input_ids\"])\n prompts = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n prompts = prompts.to(inputs_embeds.dtype)\n model_kwargs[\"inputs_embeds\"] = torch.cat((prompts, inputs_embeds), dim=1)\n model_kwargs[\"input_ids\"] = None\n\n return model_kwargs" }, { "identifier": "PeftModelForSeq2SeqLM", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForSeq2SeqLM(PeftModel):\n \"\"\"\n Peft model for sequence-to-sequence language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM\n >>> from peft import PeftModelForSeq2SeqLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"LORA\",\n ... \"task_type\": \"SEQ_2_SEQ_LM\",\n ... \"inference_mode\": False,\n ... \"r\": 8,\n ... \"target_modules\": [\"q\", \"v\"],\n ... \"lora_alpha\": 32,\n ... \"lora_dropout\": 0.1,\n ... \"merge_weights\": False,\n ... \"fan_in_fan_out\": False,\n ... \"enable_lora\": None,\n ... \"bias\": \"none\",\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n self.base_model_prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model._prepare_encoder_decoder_kwargs_for_generation\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n decoder_inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n decoder_inputs_embeds=decoder_inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if decoder_attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"decoder_attention_mask\": decoder_attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(\n input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs\n )\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n if decoder_inputs_embeds is None and decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)\n\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n kwargs[\"attention_mask\"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n # concat prompt labels\n if labels is not None:\n if peft_config.num_transformer_submodules == 1:\n kwargs[\"labels\"] = labels\n elif peft_config.num_transformer_submodules == 2:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)\n if peft_config.num_transformer_submodules == 1:\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n elif peft_config.num_transformer_submodules == 2:\n decoder_inputs_embeds = torch.cat(\n (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1\n )\n return self.base_model(\n inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs\n )\n\n def generate(self, **kwargs):\n peft_config = self.active_peft_config\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self._prepare_encoder_decoder_kwargs_for_generation\n )\n try:\n if not isinstance(peft_config, PromptLearningConfig):\n outputs = self.base_model.generate(**kwargs)\n else:\n if \"input_ids\" not in kwargs:\n raise ValueError(\"input_ids must be provided for Peft model generation\")\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\n \"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\"\n )\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n outputs = self.base_model.generate(**kwargs)\n else:\n raise NotImplementedError\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model_prepare_encoder_decoder_kwargs_for_generation\n )\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model_prepare_encoder_decoder_kwargs_for_generation\n )\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n batch_size = model_kwargs[\"decoder_input_ids\"].shape[0]\n past_key_values = self.get_prompt(batch_size)\n if self.base_model_torch_dtype is not None:\n # handle the case for Bloom where it outputs tuple of tuples\n if isinstance(past_key_values[0], tuple):\n past_key_values = tuple(\n tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_value_tuple\n )\n for past_key_value_tuple in past_key_values\n )\n else:\n past_key_values = tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values\n )\n model_kwargs[\"past_key_values\"] = past_key_values\n\n return model_kwargs" }, { "identifier": "PeftModelForSequenceClassification", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForSequenceClassification(PeftModel):\n \"\"\"\n Peft model for sequence classification tasks.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n **Attributes**:\n - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.\n - **cls_layer_name** (`str`) -- The name of the classification layer.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSequenceClassification\n >>> from peft import PeftModelForSequenceClassification, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"SEQ_CLS\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 768,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 12,\n ... \"num_layers\": 12,\n ... \"encoder_hidden_size\": 768,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\")\n >>> peft_model = PeftModelForSequenceClassification(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n if self.modules_to_save is None:\n self.modules_to_save = {\"classifier\", \"score\"}\n else:\n self.modules_to_save.update({\"classifier\", \"score\"})\n\n for name, _ in self.base_model.named_children():\n if any(module_name in name for module_name in self.modules_to_save):\n self.cls_layer_name = name\n break\n\n # to make sure classifier layer is trainable\n _set_trainable(self, adapter_name)\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)\n else:\n if kwargs.get(\"token_type_ids\", None) is not None:\n kwargs[\"token_type_ids\"] = torch.cat(\n (\n torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device),\n kwargs[\"token_type_ids\"],\n ),\n dim=1,\n ).long()\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def _prefix_tuning_forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n batch_size = input_ids.shape[0]\n past_key_values = self.get_prompt(batch_size)\n fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())\n kwargs.update(\n {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"inputs_embeds\": inputs_embeds,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n \"past_key_values\": past_key_values,\n }\n )\n if \"past_key_values\" in fwd_params:\n return self.base_model(labels=labels, **kwargs)\n else:\n transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)\n fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())\n if \"past_key_values\" not in fwd_params:\n raise ValueError(\"Model does not support past key values which are required for prefix tuning.\")\n outputs = transformer_backbone_name(**kwargs)\n pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]\n if \"dropout\" in [name for name, _ in list(self.base_model.named_children())]:\n pooled_output = self.base_model.dropout(pooled_output)\n logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.base_model.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.base_model.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )" }, { "identifier": "PeftModelForTokenClassification", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForTokenClassification(PeftModel):\n \"\"\"\n Peft model for token classification tasks.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n **Attributes**:\n - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.\n - **cls_layer_name** (`str`) -- The name of the classification layer.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSequenceClassification\n >>> from peft import PeftModelForTokenClassification, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"TOKEN_CLS\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 768,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 12,\n ... \"num_layers\": 12,\n ... \"encoder_hidden_size\": 768,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForTokenClassification.from_pretrained(\"bert-base-cased\")\n >>> peft_model = PeftModelForTokenClassification(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig = None, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n if self.modules_to_save is None:\n self.modules_to_save = {\"classifier\", \"score\"}\n else:\n self.modules_to_save.update({\"classifier\", \"score\"})\n\n for name, _ in self.base_model.named_children():\n if any(module_name in name for module_name in self.modules_to_save):\n self.cls_layer_name = name\n break\n\n # to make sure classifier layer is trainable\n _set_trainable(self, adapter_name)\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)\n else:\n if kwargs.get(\"token_type_ids\", None) is not None:\n kwargs[\"token_type_ids\"] = torch.cat(\n (\n torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device),\n kwargs[\"token_type_ids\"],\n ),\n dim=1,\n ).long()\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def _prefix_tuning_forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n batch_size = input_ids.shape[0]\n past_key_values = self.get_prompt(batch_size)\n fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())\n kwargs.update(\n {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"inputs_embeds\": inputs_embeds,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n \"past_key_values\": past_key_values,\n }\n )\n if \"past_key_values\" in fwd_params:\n return self.base_model(labels=labels, **kwargs)\n else:\n transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)\n fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())\n if \"past_key_values\" not in fwd_params:\n raise ValueError(\"Model does not support past key values which are required for prefix tuning.\")\n outputs = transformer_backbone_name(**kwargs)\n sequence_output = outputs[0]\n if \"dropout\" in [name for name, _ in list(self.base_model.named_children())]:\n sequence_output = self.base_model.dropout(sequence_output)\n logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)\n\n loss = None\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )" }, { "identifier": "LoraConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraConfig(PeftConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`LoraModel`].\n\n Args:\n r (`int`): Lora attention dimension.\n target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.\n lora_alpha (`float`): The alpha parameter for Lora scaling.\n lora_dropout (`float`): The dropout probability for Lora layers.\n fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).\n For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:\n bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'\n modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable\n and saved in the final checkpoint.\n \"\"\"\n\n r: int = field(default=8, metadata={\"help\": \"Lora attention dimension\"})\n target_modules: Optional[Union[List[str], str]] = field(\n default=None,\n metadata={\n \"help\": \"List of module names or regex expression of the module names to replace with Lora.\"\n \"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' \"\n },\n )\n lora_alpha: int = field(default=None, metadata={\"help\": \"Lora alpha\"})\n lora_dropout: float = field(default=None, metadata={\"help\": \"Lora dropout\"})\n fan_in_fan_out: bool = field(\n default=False,\n metadata={\"help\": \"Set this to True if the layer to replace stores weight like (fan_in, fan_out)\"},\n )\n bias: str = field(default=\"none\", metadata={\"help\": \"Bias type for Lora. Can be 'none', 'all' or 'lora_only'\"})\n modules_to_save: Optional[List[str]] = field(\n default=None,\n metadata={\n \"help\": \"List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. \"\n \"For example, in Sequence Classification or Token Classification tasks, \"\n \"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.\"\n },\n )\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.LORA" }, { "identifier": "AdaLoraConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/adalora.py", "snippet": "class AdaLoraConfig(LoraConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`~peft.AdaLora`].\n\n Args:\n target_r (`int`): The target average rank of incremental matrix.\n init_r (`int`): The initial rank for each incremental matrix.\n tinit (`int`): The steps of initial fine-tuning warmup.\n tfinal (`int`): The step of final fine-tuning.\n deltaT (`int`): The time internval between two budget allocations.\n beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.\n beta2 (`float`): The hyperparameter of EMA for undertainty quantification.\n orth_reg_weight (`float`): The coefficient of orthogonal regularization.\n total_step (`int`): The total training steps that should be specified before training.\n rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.\n \"\"\"\n\n target_r: int = field(default=8, metadata={\"help\": \"Target Lora matrix dimension.\"})\n init_r: int = field(default=12, metadata={\"help\": \"Intial Lora matrix dimension.\"})\n tinit: int = field(default=0, metadata={\"help\": \"The steps of initial warmup.\"})\n tfinal: int = field(default=0, metadata={\"help\": \"The steps of final warmup.\"})\n deltaT: int = field(default=1, metadata={\"help\": \"Step interval of rank allocation.\"})\n beta1: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n beta2: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n orth_reg_weight: float = field(default=0.5, metadata={\"help\": \"The orthogonal regularization coefficient.\"})\n total_step: Optional[int] = field(default=None, metadata={\"help\": \"The total training steps.\"})\n rank_pattern: Optional[dict] = field(default=None, metadata={\"help\": \"The saved rank pattern.\"})\n\n def __post_init__(self):\n self.peft_type = PeftType.ADALORA" }, { "identifier": "PromptEncoderConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/p_tuning.py", "snippet": "class PromptEncoderConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PromptEncoder`].\n\n Args:\n encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]):\n The type of reparameterization to use.\n encoder_hidden_size (`int`): The hidden size of the prompt encoder.\n encoder_num_layers (`int`): The number of layers of the prompt encoder.\n encoder_dropout (`float`): The dropout probability of the prompt encoder.\n \"\"\"\n\n encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field(\n default=PromptEncoderReparameterizationType.MLP,\n metadata={\"help\": \"How to reparameterize the prompt encoder\"},\n )\n encoder_hidden_size: int = field(\n default=None,\n metadata={\"help\": \"The hidden size of the prompt encoder\"},\n )\n encoder_num_layers: int = field(\n default=2,\n metadata={\"help\": \"The number of layers of the prompt encoder\"},\n )\n encoder_dropout: float = field(\n default=0.0,\n metadata={\"help\": \"The dropout of the prompt encoder\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.P_TUNING" }, { "identifier": "PrefixTuningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prefix_tuning.py", "snippet": "class PrefixTuningConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PrefixEncoder`].\n\n Args:\n encoder_hidden_size (`int`): The hidden size of the prompt encoder.\n prefix_projection (`bool`): Whether to project the prefix embeddings.\n \"\"\"\n\n encoder_hidden_size: int = field(\n default=None,\n metadata={\"help\": \"The hidden size of the encoder\"},\n )\n prefix_projection: bool = field(\n default=False,\n metadata={\"help\": \"Whether to project the prefix tokens\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.PREFIX_TUNING" }, { "identifier": "PromptTuningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prompt_tuning.py", "snippet": "class PromptTuningConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PromptEmbedding`].\n\n Args:\n prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding.\n prompt_tuning_init_text (`str`, *optional*):\n The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`.\n tokenizer_name_or_path (`str`, *optional*):\n The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`.\n \"\"\"\n\n prompt_tuning_init: Union[PromptTuningInit, str] = field(\n default=PromptTuningInit.RANDOM,\n metadata={\"help\": \"How to initialize the prompt tuning parameters\"},\n )\n prompt_tuning_init_text: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`\"\n },\n )\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`\"\n },\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.PROMPT_TUNING" }, { "identifier": "PromptLearningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" } ]
from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import AdaLoraConfig, LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig from .utils import PromptLearningConfig
14,279
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig,
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig,
"PREFIX_TUNING": PrefixTuningConfig,
8
2023-10-30 10:50:32+00:00
16k
chenran-li/RQL-release
stable_baselines3/a2c/a2c.py
[ { "identifier": "OnPolicyAlgorithm", "path": "stable_baselines3/common/on_policy_algorithm.py", "snippet": "class OnPolicyAlgorithm(BaseAlgorithm):\n \"\"\"\n The base for On-Policy algorithms (ex: A2C/PPO).\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress remaining (from 1 to 0)\n :param n_steps: The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param gamma: Discount factor\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator.\n Equivalent to classic advantage when set to 1.\n :param ent_coef: Entropy coefficient for the loss calculation\n :param vf_coef: Value function coefficient for the loss calculation\n :param max_grad_norm: The maximum value for the gradient clipping\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[ActorCriticPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule],\n n_steps: int,\n gamma: float,\n gae_lambda: float,\n ent_coef: float,\n vf_coef: float,\n max_grad_norm: float,\n use_sde: bool,\n sde_sample_freq: int,\n tensorboard_log: Optional[str] = None,\n monitor_wrapper: bool = True,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n\n super().__init__(\n policy=policy,\n env=env,\n learning_rate=learning_rate,\n policy_kwargs=policy_kwargs,\n verbose=verbose,\n device=device,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n support_multi_env=True,\n seed=seed,\n tensorboard_log=tensorboard_log,\n supported_action_spaces=supported_action_spaces,\n )\n\n self.n_steps = n_steps\n self.gamma = gamma\n self.gae_lambda = gae_lambda\n self.ent_coef = ent_coef\n self.vf_coef = vf_coef\n self.max_grad_norm = max_grad_norm\n self.rollout_buffer = None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n self._setup_lr_schedule()\n self.set_random_seed(self.seed)\n\n buffer_cls = DictRolloutBuffer if isinstance(self.observation_space, spaces.Dict) else RolloutBuffer\n\n self.rollout_buffer = buffer_cls(\n self.n_steps,\n self.observation_space,\n self.action_space,\n device=self.device,\n gamma=self.gamma,\n gae_lambda=self.gae_lambda,\n n_envs=self.n_envs,\n )\n self.policy = self.policy_class( # pytype:disable=not-instantiable\n self.observation_space,\n self.action_space,\n self.lr_schedule,\n use_sde=self.use_sde,\n **self.policy_kwargs # pytype:disable=not-instantiable\n )\n self.policy = self.policy.to(self.device)\n\n def collect_rollouts(\n self,\n env: VecEnv,\n callback: BaseCallback,\n rollout_buffer: RolloutBuffer,\n n_rollout_steps: int,\n ) -> bool:\n \"\"\"\n Collect experiences using the current policy and fill a ``RolloutBuffer``.\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n\n :param env: The training environment\n :param callback: Callback that will be called at each step\n (and at the beginning and end of the rollout)\n :param rollout_buffer: Buffer to fill with rollouts\n :param n_rollout_steps: Number of experiences to collect per environment\n :return: True if function returned with at least `n_rollout_steps`\n collected, False if callback terminated rollout prematurely.\n \"\"\"\n assert self._last_obs is not None, \"No previous observation was provided\"\n # Switch to eval mode (this affects batch norm / dropout)\n self.policy.set_training_mode(False)\n\n n_steps = 0\n rollout_buffer.reset()\n # Sample new weights for the state dependent exploration\n if self.use_sde:\n self.policy.reset_noise(env.num_envs)\n\n callback.on_rollout_start()\n\n while n_steps < n_rollout_steps:\n if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:\n # Sample a new noise matrix\n self.policy.reset_noise(env.num_envs)\n\n with th.no_grad():\n # Convert to pytorch tensor or to TensorDict\n obs_tensor = obs_as_tensor(self._last_obs, self.device)\n actions, values, log_probs = self.policy(obs_tensor)\n actions = actions.cpu().numpy()\n\n # Rescale and perform action\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.action_space, spaces.Box):\n clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n new_obs, rewards, dones, infos = env.step(clipped_actions)\n\n self.num_timesteps += env.num_envs\n\n # Give access to local variables\n callback.update_locals(locals())\n if callback.on_step() is False:\n return False\n\n self._update_info_buffer(infos)\n n_steps += 1\n\n if isinstance(self.action_space, spaces.Discrete):\n # Reshape in case of discrete action\n actions = actions.reshape(-1, 1)\n\n # Handle timeout by bootstraping with value function\n # see GitHub issue #633\n for idx, done in enumerate(dones):\n if (\n done\n and infos[idx].get(\"terminal_observation\") is not None\n and infos[idx].get(\"TimeLimit.truncated\", False)\n ):\n terminal_obs = self.policy.obs_to_tensor(infos[idx][\"terminal_observation\"])[0]\n with th.no_grad():\n terminal_value = self.policy.predict_values(terminal_obs)[0]\n rewards[idx] += self.gamma * terminal_value\n\n rollout_buffer.add(self._last_obs, actions, rewards, self._last_episode_starts, values, log_probs)\n self._last_obs = new_obs\n self._last_episode_starts = dones\n\n with th.no_grad():\n # Compute value for the last timestep\n values = self.policy.predict_values(obs_as_tensor(new_obs, self.device))\n\n rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)\n\n callback.on_rollout_end()\n\n return True\n\n def train(self) -> None:\n \"\"\"\n Consume current rollout data and update policy parameters.\n Implemented by individual algorithms.\n \"\"\"\n raise NotImplementedError\n\n def learn(\n self: SelfOnPolicyAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 1,\n tb_log_name: str = \"OnPolicyAlgorithm\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfOnPolicyAlgorithm:\n iteration = 0\n\n total_timesteps, callback = self._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n callback.on_training_start(locals(), globals())\n\n while self.num_timesteps < total_timesteps:\n\n continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)\n\n if continue_training is False:\n break\n\n iteration += 1\n self._update_current_progress_remaining(self.num_timesteps, total_timesteps)\n\n # Display training infos\n if log_interval is not None and iteration % log_interval == 0:\n time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)\n fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)\n self.logger.record(\"time/iterations\", iteration, exclude=\"tensorboard\")\n if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:\n self.logger.record(\"rollout/ep_rew_mean\", safe_mean([ep_info[\"r\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"rollout/ep_len_mean\", safe_mean([ep_info[\"l\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"time/fps\", fps)\n self.logger.record(\"time/time_elapsed\", int(time_elapsed), exclude=\"tensorboard\")\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(step=self.num_timesteps)\n\n self.train()\n\n callback.on_training_end()\n\n return self\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"policy.optimizer\"]\n\n return state_dicts, []" }, { "identifier": "ActorCriticCnnPolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class ActorCriticCnnPolicy(ActorCriticPolicy):\n \"\"\"\n CNN policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Union[List[int], Dict[str, List[int]], List[Dict[str, List[int]]], None] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n share_features_extractor: bool = True,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super().__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n share_features_extractor,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )" }, { "identifier": "ActorCriticPolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class ActorCriticPolicy(BasePolicy):\n \"\"\"\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n # TODO(antonin): update type annotation when we remove shared network support\n net_arch: Union[List[int], Dict[str, List[int]], List[Dict[str, List[int]]], None] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n share_features_extractor: bool = True,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n\n if optimizer_kwargs is None:\n optimizer_kwargs = {}\n # Small values to avoid NaN in Adam optimizer\n if optimizer_class == th.optim.Adam:\n optimizer_kwargs[\"eps\"] = 1e-5\n\n super().__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs,\n squash_output=squash_output,\n normalize_images=normalize_images,\n )\n\n # Convert [dict()] to dict() as shared network are deprecated\n if isinstance(net_arch, list) and len(net_arch) > 0:\n if isinstance(net_arch[0], dict):\n warnings.warn(\n (\n \"As shared layers in the mlp_extractor are deprecated and will be removed in SB3 v1.8.0, \"\n \"you should now pass directly a dictionary and not a list \"\n \"(net_arch=dict(pi=..., vf=...) instead of net_arch=[dict(pi=..., vf=...)])\"\n ),\n )\n net_arch = net_arch[0]\n else:\n # Note: deprecation warning will be emitted\n # by the MlpExtractor constructor\n pass\n\n # Default network architecture, from stable-baselines\n if net_arch is None:\n if features_extractor_class == NatureCNN:\n net_arch = []\n else:\n net_arch = dict(pi=[64, 64], vf=[64, 64])\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.ortho_init = ortho_init\n\n self.share_features_extractor = share_features_extractor\n self.features_extractor = self.make_features_extractor()\n self.features_dim = self.features_extractor.features_dim\n if self.share_features_extractor:\n self.pi_features_extractor = self.features_extractor\n self.vf_features_extractor = self.features_extractor\n else:\n self.pi_features_extractor = self.features_extractor\n self.vf_features_extractor = self.make_features_extractor()\n # if the features extractor is not shared, there cannot be shared layers in the mlp_extractor\n # TODO(antonin): update the check once we change net_arch behavior\n if isinstance(net_arch, list) and len(net_arch) > 0:\n raise ValueError(\n \"Error: if the features extractor is not shared, there cannot be shared layers in the mlp_extractor\"\n )\n\n self.log_std_init = log_std_init\n dist_kwargs = None\n # Keyword arguments for gSDE distribution\n if use_sde:\n dist_kwargs = {\n \"full_std\": full_std,\n \"squash_output\": squash_output,\n \"use_expln\": use_expln,\n \"learn_features\": False,\n }\n\n self.use_sde = use_sde\n self.dist_kwargs = dist_kwargs\n\n # Action distribution\n self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)\n\n self._build(lr_schedule)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)\n\n data.update(\n dict(\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n use_sde=self.use_sde,\n log_std_init=self.log_std_init,\n squash_output=default_none_kwargs[\"squash_output\"],\n full_std=default_none_kwargs[\"full_std\"],\n use_expln=default_none_kwargs[\"use_expln\"],\n lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone\n ortho_init=self.ortho_init,\n optimizer_class=self.optimizer_class,\n optimizer_kwargs=self.optimizer_kwargs,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n )\n )\n return data\n\n def reset_noise(self, n_envs: int = 1) -> None:\n \"\"\"\n Sample new weights for the exploration matrix.\n\n :param n_envs:\n \"\"\"\n assert isinstance(self.action_dist, StateDependentNoiseDistribution), \"reset_noise() is only available when using gSDE\"\n self.action_dist.sample_weights(self.log_std, batch_size=n_envs)\n\n def _build_mlp_extractor(self) -> None:\n \"\"\"\n Create the policy and value networks.\n Part of the layers can be shared.\n \"\"\"\n # Note: If net_arch is None and some features extractor is used,\n # net_arch here is an empty list and mlp_extractor does not\n # really contain any layers (acts like an identity module).\n self.mlp_extractor = MlpExtractor(\n self.features_dim,\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n device=self.device,\n )\n\n def _build(self, lr_schedule: Schedule) -> None:\n \"\"\"\n Create the networks and the optimizer.\n\n :param lr_schedule: Learning rate schedule\n lr_schedule(1) is the initial learning rate\n \"\"\"\n self._build_mlp_extractor()\n\n latent_dim_pi = self.mlp_extractor.latent_dim_pi\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):\n self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)\n else:\n raise NotImplementedError(f\"Unsupported distribution '{self.action_dist}'.\")\n\n self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)\n # Init weights: use orthogonal initialization\n # with small initial weight for the output\n if self.ortho_init:\n # TODO: check for features_extractor\n # Values from stable-baselines.\n # features_extractor/mlp values are\n # originally from openai/baselines (default gains/init_scales).\n module_gains = {\n self.features_extractor: np.sqrt(2),\n self.mlp_extractor: np.sqrt(2),\n self.action_net: 0.01,\n self.value_net: 1,\n }\n if not self.share_features_extractor:\n # Note(antonin): this is to keep SB3 results\n # consistent, see GH#1148\n del module_gains[self.features_extractor]\n module_gains[self.pi_features_extractor] = np.sqrt(2)\n module_gains[self.vf_features_extractor] = np.sqrt(2)\n\n for module, gain in module_gains.items():\n module.apply(partial(self.init_weights, gain=gain))\n\n # Setup optimizer with initial learning rate\n self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)\n\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n\n :param obs: Observation\n :param deterministic: Whether to sample or use deterministic actions\n :return: action, value and log probability of the action\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n if self.share_features_extractor:\n latent_pi, latent_vf = self.mlp_extractor(features)\n else:\n pi_features, vf_features = features\n latent_pi = self.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.mlp_extractor.forward_critic(vf_features)\n # Evaluate the values for the given observations\n values = self.value_net(latent_vf)\n distribution = self._get_action_dist_from_latent(latent_pi)\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n actions = actions.reshape((-1,) + self.action_space.shape)\n return actions, values, log_prob\n\n def extract_features(self, obs: th.Tensor) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:\n \"\"\"\n Preprocess the observation if needed and extract features.\n\n :param obs: Observation\n :return: the output of the features extractor(s)\n \"\"\"\n if self.share_features_extractor:\n return super().extract_features(obs, self.features_extractor)\n else:\n pi_features = super().extract_features(obs, self.pi_features_extractor)\n vf_features = super().extract_features(obs, self.vf_features_extractor)\n return pi_features, vf_features\n\n def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:\n \"\"\"\n Retrieve action distribution given the latent codes.\n\n :param latent_pi: Latent code for the actor\n :return: Action distribution\n \"\"\"\n mean_actions = self.action_net(latent_pi)\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std)\n elif isinstance(self.action_dist, CategoricalDistribution):\n # Here mean_actions are the logits before the softmax\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, MultiCategoricalDistribution):\n # Here mean_actions are the flattened logits\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, BernoulliDistribution):\n # Here mean_actions are the logits (before rounding to get the binary actions)\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)\n else:\n raise ValueError(\"Invalid action distribution\")\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n return self.get_distribution(observation).get_actions(deterministic=deterministic)\n\n def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Optional[th.Tensor]]:\n \"\"\"\n Evaluate actions according to the current policy,\n given the observations.\n\n :param obs: Observation\n :param actions: Actions\n :return: estimated value, log likelihood of taking those actions\n and entropy of the action distribution.\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n if self.share_features_extractor:\n latent_pi, latent_vf = self.mlp_extractor(features)\n else:\n pi_features, vf_features = features\n latent_pi = self.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.mlp_extractor.forward_critic(vf_features)\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n entropy = distribution.entropy()\n return values, log_prob, entropy\n\n def get_distribution(self, obs: th.Tensor) -> Distribution:\n \"\"\"\n Get the current policy distribution given the observations.\n\n :param obs:\n :return: the action distribution.\n \"\"\"\n features = super().extract_features(obs, self.pi_features_extractor)\n latent_pi = self.mlp_extractor.forward_actor(features)\n return self._get_action_dist_from_latent(latent_pi)\n\n def predict_values(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Get the estimated values according to the current policy given the observations.\n\n :param obs: Observation\n :return: the estimated values.\n \"\"\"\n features = super().extract_features(obs, self.vf_features_extractor)\n latent_vf = self.mlp_extractor.forward_critic(features)\n return self.value_net(latent_vf)" }, { "identifier": "BasePolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))" }, { "identifier": "MultiInputActorCriticPolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class MultiInputActorCriticPolicy(ActorCriticPolicy):\n \"\"\"\n MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space (Tuple)\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Uses the CombinedExtractor\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Union[List[int], Dict[str, List[int]], List[Dict[str, List[int]]], None] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n share_features_extractor: bool = True,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super().__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n share_features_extractor,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )" }, { "identifier": "GymEnv", "path": "stable_baselines3/common/type_aliases.py", "snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:" }, { "identifier": "explained_variance", "path": "stable_baselines3/common/utils.py", "snippet": "def explained_variance(y_pred: np.ndarray, y_true: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n :param y_pred: the prediction\n :param y_true: the expected value\n :return: explained variance of ypred and y\n \"\"\"\n assert y_true.ndim == 1 and y_pred.ndim == 1\n var_y = np.var(y_true)\n return np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y" } ]
from typing import Any, Dict, Optional, Type, TypeVar, Union from gym import spaces from torch.nn import functional as F from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import explained_variance import torch as th
10,962
:param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": ActorCriticPolicy, "CnnPolicy": ActorCriticCnnPolicy, "MultiInputPolicy": MultiInputActorCriticPolicy, } def __init__( self, policy: Union[str, Type[ActorCriticPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 7e-4, n_steps: int = 5, gamma: float = 0.99, gae_lambda: float = 1.0, ent_coef: float = 0.0, vf_coef: float = 0.5, max_grad_norm: float = 0.5, rms_prop_eps: float = 1e-5, use_rms_prop: bool = True, use_sde: bool = False, sde_sample_freq: int = -1, normalize_advantage: bool = False, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): super().__init__( policy, env, learning_rate=learning_rate, n_steps=n_steps, gamma=gamma, gae_lambda=gae_lambda, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, use_sde=use_sde, sde_sample_freq=sde_sample_freq, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, device=device, seed=seed, _init_setup_model=False, supported_action_spaces=( spaces.Box, spaces.Discrete, spaces.MultiDiscrete, spaces.MultiBinary, ), ) self.normalize_advantage = normalize_advantage # Update optimizer inside the policy if we want to use RMSProp # (original implementation) rather than Adam if use_rms_prop and "optimizer_class" not in self.policy_kwargs: self.policy_kwargs["optimizer_class"] = th.optim.RMSprop self.policy_kwargs["optimizer_kwargs"] = dict(alpha=0.99, eps=rms_prop_eps, weight_decay=0) if _init_setup_model: self._setup_model() def train(self) -> None: """ Update policy using the currently gathered rollout buffer (one gradient step over whole data). """ # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update optimizer learning rate self._update_learning_rate(self.policy.optimizer) # This will only loop once (get all data in one go) for rollout_data in self.rollout_buffer.get(batch_size=None): actions = rollout_data.actions if isinstance(self.action_space, spaces.Discrete): # Convert discrete action from float to long actions = actions.long().flatten() values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions) values = values.flatten() # Normalize advantage (not present in the original implementation) advantages = rollout_data.advantages if self.normalize_advantage: advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) # Policy gradient loss policy_loss = -(advantages * log_prob).mean() # Value loss using the TD(gae_lambda) target value_loss = F.mse_loss(rollout_data.returns, values) # Entropy loss favor exploration if entropy is None: # Approximate entropy when no analytical form entropy_loss = -th.mean(-log_prob) else: entropy_loss = -th.mean(entropy) loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss # Optimization step self.policy.optimizer.zero_grad() loss.backward() # Clip grad norm th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) self.policy.optimizer.step()
SelfA2C = TypeVar("SelfA2C", bound="A2C") class A2C(OnPolicyAlgorithm): """ Advantage Actor Critic (A2C) Paper: https://arxiv.org/abs/1602.01783 Code: This implementation borrows code from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and and Stable Baselines (https://github.com/hill-a/stable-baselines) Introduction to A2C: https://hackernoon.com/intuitive-rl-intro-to-advantage-actor-critic-a2c-4ff545978752 :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: The learning rate, it can be a function of the current progress remaining (from 1 to 0) :param n_steps: The number of steps to run for each environment per update (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel) :param gamma: Discount factor :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator Equivalent to classic advantage when set to 1. :param ent_coef: Entropy coefficient for the loss calculation :param vf_coef: Value function coefficient for the loss calculation :param max_grad_norm: The maximum value for the gradient clipping :param rms_prop_eps: RMSProp epsilon. It stabilizes square root computation in denominator of RMSProp update :param use_rms_prop: Whether to use RMSprop (default) or Adam as optimizer :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param normalize_advantage: Whether to normalize or not the advantage :param tensorboard_log: the log location for tensorboard (if None, no logging) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messages :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": ActorCriticPolicy, "CnnPolicy": ActorCriticCnnPolicy, "MultiInputPolicy": MultiInputActorCriticPolicy, } def __init__( self, policy: Union[str, Type[ActorCriticPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 7e-4, n_steps: int = 5, gamma: float = 0.99, gae_lambda: float = 1.0, ent_coef: float = 0.0, vf_coef: float = 0.5, max_grad_norm: float = 0.5, rms_prop_eps: float = 1e-5, use_rms_prop: bool = True, use_sde: bool = False, sde_sample_freq: int = -1, normalize_advantage: bool = False, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): super().__init__( policy, env, learning_rate=learning_rate, n_steps=n_steps, gamma=gamma, gae_lambda=gae_lambda, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, use_sde=use_sde, sde_sample_freq=sde_sample_freq, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, device=device, seed=seed, _init_setup_model=False, supported_action_spaces=( spaces.Box, spaces.Discrete, spaces.MultiDiscrete, spaces.MultiBinary, ), ) self.normalize_advantage = normalize_advantage # Update optimizer inside the policy if we want to use RMSProp # (original implementation) rather than Adam if use_rms_prop and "optimizer_class" not in self.policy_kwargs: self.policy_kwargs["optimizer_class"] = th.optim.RMSprop self.policy_kwargs["optimizer_kwargs"] = dict(alpha=0.99, eps=rms_prop_eps, weight_decay=0) if _init_setup_model: self._setup_model() def train(self) -> None: """ Update policy using the currently gathered rollout buffer (one gradient step over whole data). """ # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update optimizer learning rate self._update_learning_rate(self.policy.optimizer) # This will only loop once (get all data in one go) for rollout_data in self.rollout_buffer.get(batch_size=None): actions = rollout_data.actions if isinstance(self.action_space, spaces.Discrete): # Convert discrete action from float to long actions = actions.long().flatten() values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions) values = values.flatten() # Normalize advantage (not present in the original implementation) advantages = rollout_data.advantages if self.normalize_advantage: advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) # Policy gradient loss policy_loss = -(advantages * log_prob).mean() # Value loss using the TD(gae_lambda) target value_loss = F.mse_loss(rollout_data.returns, values) # Entropy loss favor exploration if entropy is None: # Approximate entropy when no analytical form entropy_loss = -th.mean(-log_prob) else: entropy_loss = -th.mean(entropy) loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss # Optimization step self.policy.optimizer.zero_grad() loss.backward() # Clip grad norm th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) self.policy.optimizer.step()
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
6
2023-10-28 01:09:21+00:00
16k
zyang1580/CoLLM
minigpt4/runners/runner_base_rec.py
[ { "identifier": "download_cached_file", "path": "minigpt4/common/dist_utils.py", "snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()" }, { "identifier": "get_rank", "path": "minigpt4/common/dist_utils.py", "snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()" }, { "identifier": "get_world_size", "path": "minigpt4/common/dist_utils.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "is_main_process", "path": "minigpt4/common/dist_utils.py", "snippet": "def is_main_process():\n return get_rank() == 0" }, { "identifier": "main_process", "path": "minigpt4/common/dist_utils.py", "snippet": "def main_process(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper" }, { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "is_url", "path": "minigpt4/common/utils.py", "snippet": "def is_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")" }, { "identifier": "concat_datasets", "path": "minigpt4/datasets/data_utils.py", "snippet": "def concat_datasets(datasets):\n \"\"\"\n Concatenates multiple datasets into a single dataset.\n\n It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support\n generic IterableDataset because it requires creating separate samplers.\n\n Now only supports conctenating training datasets and assuming validation and testing\n have only a single dataset. This is because metrics should not be computed on the concatenated\n datasets.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by split.\n\n Returns:\n Dict of concatenated datasets by split, \"train\" is the concatenation of multiple datasets,\n \"val\" and \"test\" remain the same.\n\n If the input training datasets contain both map-style and DataPipeline datasets, returns\n a tuple, where the first element is a concatenated map-style dataset and the second\n element is a chained DataPipeline dataset.\n\n \"\"\"\n # concatenate datasets in the same split\n for split_name in datasets:\n if split_name != \"train\":\n assert (\n len(datasets[split_name]) == 1\n ), \"Do not support multiple {} datasets.\".format(split_name)\n datasets[split_name] = datasets[split_name][0]\n else:\n iterable_datasets, map_datasets = [], []\n for dataset in datasets[split_name]:\n if isinstance(dataset, wds.DataPipeline):\n logging.info(\n \"Dataset {} is IterableDataset, can't be concatenated.\".format(\n dataset\n )\n )\n iterable_datasets.append(dataset)\n elif isinstance(dataset, IterableDataset):\n raise NotImplementedError(\n \"Do not support concatenation of generic IterableDataset.\"\n )\n else:\n map_datasets.append(dataset)\n\n # if len(iterable_datasets) > 0:\n # concatenate map-style datasets and iterable-style datasets separately\n if len(iterable_datasets) > 1:\n chained_datasets = (\n ChainDataset(iterable_datasets)\n )\n elif len(iterable_datasets) == 1:\n chained_datasets = iterable_datasets[0]\n else:\n chained_datasets = None\n\n concat_datasets = (\n ConcatDataset(map_datasets) if len(map_datasets) > 0 else None\n )\n\n train_datasets = concat_datasets, chained_datasets\n train_datasets = tuple([x for x in train_datasets if x is not None])\n train_datasets = (\n train_datasets[0] if len(train_datasets) == 1 else train_datasets\n )\n\n datasets[split_name] = train_datasets\n\n return datasets" }, { "identifier": "reorg_datasets_by_split", "path": "minigpt4/datasets/data_utils.py", "snippet": "def reorg_datasets_by_split(datasets):\n \"\"\"\n Organizes datasets by split.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by name.\n\n Returns:\n Dict of datasets by split {split_name: List[Datasets]}.\n \"\"\"\n # if len(datasets) == 1:\n # return datasets[list(datasets.keys())[0]]\n # else:\n reorg_datasets = dict()\n\n # reorganize by split\n for _, dataset in datasets.items():\n for split_name, dataset_split in dataset.items():\n if split_name not in reorg_datasets:\n reorg_datasets[split_name] = [dataset_split]\n else:\n reorg_datasets[split_name].append(dataset_split)\n\n return reorg_datasets" }, { "identifier": "ChainDataset", "path": "minigpt4/datasets/data_utils.py", "snippet": "class ChainDataset(wds.DataPipeline):\n r\"\"\"Dataset for chaining multiple :class:`DataPipeline` s.\n\n This class is useful to assemble different existing dataset streams. The\n chaining operation is done on-the-fly, so concatenating large-scale\n datasets with this class will be efficient.\n\n Args:\n datasets (iterable of IterableDataset): datasets to be chained together\n \"\"\"\n def __init__(self, datasets: List[wds.DataPipeline]) -> None:\n super().__init__()\n self.datasets = datasets\n self.prob = []\n self.names = []\n for dataset in self.datasets:\n if hasattr(dataset, 'name'):\n self.names.append(dataset.name)\n else:\n self.names.append('Unknown')\n if hasattr(dataset, 'sample_ratio'):\n self.prob.append(dataset.sample_ratio)\n else:\n self.prob.append(1)\n logging.info(\"One of the datapipeline doesn't define ratio and set to 1 automatically.\")\n\n def __iter__(self):\n datastreams = [iter(dataset) for dataset in self.datasets]\n while True:\n select_datastream = random.choices(datastreams, weights=self.prob, k=1)[0]\n yield next(select_datastream)" }, { "identifier": "IterLoader", "path": "minigpt4/datasets/datasets/dataloader_utils.py", "snippet": "class IterLoader:\n \"\"\"\n A wrapper to convert DataLoader as an infinite iterator.\n\n Modified from:\n https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py\n \"\"\"\n\n def __init__(self, dataloader: DataLoader, use_distributed: bool = False):\n self._dataloader = dataloader\n self.iter_loader = iter(self._dataloader)\n self._use_distributed = use_distributed\n self._epoch = 0\n\n @property\n def epoch(self) -> int:\n return self._epoch\n\n def __next__(self):\n try:\n data = next(self.iter_loader)\n except StopIteration:\n self._epoch += 1\n if hasattr(self._dataloader.sampler, \"set_epoch\") and self._use_distributed:\n self._dataloader.sampler.set_epoch(self._epoch)\n time.sleep(2) # Prevent possible deadlock during epoch transition\n self.iter_loader = iter(self._dataloader)\n data = next(self.iter_loader)\n\n return data\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return len(self._dataloader)" }, { "identifier": "MultiIterLoader", "path": "minigpt4/datasets/datasets/dataloader_utils.py", "snippet": "class MultiIterLoader:\n \"\"\"\n A simple wrapper for iterating over multiple iterators.\n\n Args:\n loaders (List[Loader]): List of Iterator loaders.\n ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly.\n \"\"\"\n\n def __init__(self, loaders, ratios=None):\n # assert all loaders has __next__ method\n # self.nums = []\n for loader in loaders:\n assert hasattr(\n loader, \"__next__\"\n ), \"Loader {} has no __next__ method.\".format(loader)\n #self.nums.extend(len(loader))\n\n if ratios is None:\n ratios = [1.0] * len(loaders)\n else:\n assert len(ratios) == len(loaders)\n ratios = [float(ratio) / sum(ratios) for ratio in ratios]\n\n self.loaders = loaders\n self.ratios = ratios\n\n def __next__(self):\n # random sample from each loader by ratio\n loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0]\n return next(self.loaders[loader_idx])\n \n # def __len__(self):\n # return len(self.loaders)\n \n # def __iter__(self):\n # # for loader in self.loaders:\n # # yield loader\n # return self" }, { "identifier": "PrefetchLoader", "path": "minigpt4/datasets/datasets/dataloader_utils.py", "snippet": "class PrefetchLoader(object):\n \"\"\"\n Modified from https://github.com/ChenRocks/UNITER.\n\n overlap compute and cuda data transfer\n (copied and then modified from nvidia apex)\n \"\"\"\n\n def __init__(self, loader):\n self.loader = loader\n self.stream = torch.cuda.Stream()\n\n def __iter__(self):\n loader_it = iter(self.loader)\n self.preload(loader_it)\n batch = self.next(loader_it)\n while batch is not None:\n is_tuple = isinstance(batch, tuple)\n if is_tuple:\n task, batch = batch\n\n if is_tuple:\n yield task, batch\n else:\n yield batch\n batch = self.next(loader_it)\n\n def __len__(self):\n return len(self.loader)\n\n def preload(self, it):\n try:\n self.batch = next(it)\n except StopIteration:\n self.batch = None\n return\n # if record_stream() doesn't work, another option is to make sure\n # device inputs are created on the main stream.\n # self.next_input_gpu = torch.empty_like(self.next_input,\n # device='cuda')\n # self.next_target_gpu = torch.empty_like(self.next_target,\n # device='cuda')\n # Need to make sure the memory allocated for next_* is not still in use\n # by the main stream at the time we start copying to next_*:\n # self.stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(self.stream):\n self.batch = move_to_cuda(self.batch)\n # more code for the alternative if record_stream() doesn't work:\n # copy_ will record the use of the pinned source tensor in this\n # side stream.\n # self.next_input_gpu.copy_(self.next_input, non_blocking=True)\n # self.next_target_gpu.copy_(self.next_target, non_blocking=True)\n # self.next_input = self.next_input_gpu\n # self.next_target = self.next_target_gpu\n\n def next(self, it):\n torch.cuda.current_stream().wait_stream(self.stream)\n batch = self.batch\n if batch is not None:\n record_cuda_stream(batch)\n self.preload(it)\n return batch\n \n def __next__(self):\n pass\n\n def __getattr__(self, name):\n method = self.loader.__getattribute__(name)\n return method" }, { "identifier": "RunnerBase", "path": "minigpt4/runners/runner_base.py", "snippet": "class RunnerBase:\n \"\"\"\n A runner class to train and evaluate a model given a task and datasets.\n\n The runner uses pytorch distributed data parallel by default. Future release\n will support other distributed frameworks.\n \"\"\"\n\n def __init__(self, cfg, task, model, datasets, job_id):\n self.config = cfg\n self.job_id = job_id\n\n self.task = task\n self.datasets = datasets\n\n self._model = model\n\n self._wrapped_model = None\n self._device = None\n self._optimizer = None\n self._scaler = None\n self._dataloaders = None\n self._lr_sched = None\n\n self.start_epoch = 0\n\n # self.setup_seeds()\n self.setup_output_dir()\n\n @property\n def device(self):\n if self._device is None:\n self._device = torch.device(self.config.run_cfg.device)\n\n return self._device\n\n @property\n def use_distributed(self):\n return self.config.run_cfg.distributed\n\n @property\n def model(self):\n \"\"\"\n A property to get the DDP-wrapped model on the device.\n \"\"\"\n # move model to device\n if self._model.device != self.device:\n self._model = self._model.to(self.device)\n\n # distributed training wrapper\n if self.use_distributed:\n if self._wrapped_model is None:\n self._wrapped_model = DDP(\n self._model, device_ids=[self.config.run_cfg.gpu], find_unused_parameters=True\n )\n else:\n self._wrapped_model = self._model\n\n return self._wrapped_model\n\n @property\n def optimizer(self):\n # TODO make optimizer class and configurations\n if self._optimizer is None:\n num_parameters = 0\n p_wd, p_non_wd = [], []\n for n, p in self.model.named_parameters():\n if not p.requires_grad:\n continue # frozen weights\n print(n)\n if p.ndim < 2 or \"bias\" in n or \"ln\" in n or \"bn\" in n:\n p_non_wd.append(p)\n else:\n p_wd.append(p)\n num_parameters += p.data.nelement()\n logging.info(\"number of trainable parameters: %d\" % num_parameters)\n self._num_trainable_para = num_parameters > 0\n optim_params = [\n {\n \"params\": p_wd,\n \"weight_decay\": float(self.config.run_cfg.weight_decay),\n },\n {\"params\": p_non_wd, \"weight_decay\": 0},\n ]\n beta2 = self.config.run_cfg.get(\"beta2\", 0.999)\n self._optimizer = torch.optim.AdamW(\n optim_params,\n lr=float(self.config.run_cfg.init_lr),\n weight_decay=float(self.config.run_cfg.weight_decay),\n betas=(0.9, beta2),\n )\n\n return self._optimizer\n\n @property\n def scaler(self):\n amp = self.config.run_cfg.get(\"amp\", False)\n\n if amp:\n if self._scaler is None:\n self._scaler = torch.cuda.amp.GradScaler()\n\n return self._scaler\n\n @property\n def lr_scheduler(self):\n \"\"\"\n A property to get and create learning rate scheduler by split just in need.\n \"\"\"\n if self._lr_sched is None:\n lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)\n\n # max_epoch = self.config.run_cfg.max_epoch\n max_epoch = self.max_epoch\n # min_lr = self.config.run_cfg.min_lr\n min_lr = self.min_lr\n # init_lr = self.config.run_cfg.init_lr\n init_lr = self.init_lr\n\n # optional parameters\n decay_rate = self.config.run_cfg.get(\"lr_decay_rate\", None)\n warmup_start_lr = self.config.run_cfg.get(\"warmup_lr\", -1)\n warmup_steps = self.config.run_cfg.get(\"warmup_steps\", 0)\n iters_per_epoch = self.config.run_cfg.get(\"iters_per_epoch\", None)\n\n if iters_per_epoch is None:\n try:\n iters_per_epoch = len(self.dataloaders['train'])\n except (AttributeError, TypeError):\n iters_per_epoch = 10000\n\n self._lr_sched = lr_sched_cls(\n optimizer=self.optimizer,\n max_epoch=max_epoch,\n iters_per_epoch=iters_per_epoch,\n min_lr=min_lr,\n init_lr=init_lr,\n decay_rate=decay_rate,\n warmup_start_lr=warmup_start_lr,\n warmup_steps=warmup_steps,\n )\n\n return self._lr_sched\n\n @property\n def dataloaders(self) -> dict:\n \"\"\"\n A property to get and create dataloaders by split just in need.\n\n If no train_dataset_ratio is provided, concatenate map-style datasets and\n chain wds.DataPipe datasets separately. Training set becomes a tuple\n (ConcatDataset, ChainDataset), both are optional but at least one of them is\n required. The resultant ConcatDataset and ChainDataset will be sampled evenly.\n\n If train_dataset_ratio is provided, create a MultiIterLoader to sample\n each dataset by ratios during training.\n\n Currently do not support multiple datasets for validation and test.\n\n Returns:\n dict: {split_name: (tuples of) dataloader}\n \"\"\"\n if self._dataloaders is None:\n\n # concatenate map-style datasets and chain wds.DataPipe datasets separately\n # training set becomes a tuple (ConcatDataset, ChainDataset), both are\n # optional but at least one of them is required. The resultant ConcatDataset\n # and ChainDataset will be sampled evenly.\n logging.info(\n \"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline).\"\n )\n\n datasets = reorg_datasets_by_split(self.datasets)\n self.datasets = datasets\n # self.datasets = concat_datasets(datasets)\n\n # print dataset statistics after concatenation/chaining\n for split_name in self.datasets:\n if isinstance(self.datasets[split_name], tuple) or isinstance(\n self.datasets[split_name], list\n ):\n # mixed wds.DataPipeline and torch.utils.data.Dataset\n num_records = sum(\n [\n len(d)\n if not type(d) in [wds.DataPipeline, ChainDataset]\n else 0\n for d in self.datasets[split_name]\n ]\n )\n\n else:\n if hasattr(self.datasets[split_name], \"__len__\"):\n # a single map-style dataset\n num_records = len(self.datasets[split_name])\n else:\n # a single wds.DataPipeline\n num_records = -1\n logging.info(\n \"Only a single wds.DataPipeline dataset, no __len__ attribute.\"\n )\n\n if num_records >= 0:\n logging.info(\n \"Loaded {} records for {} split from the dataset.\".format(\n num_records, split_name\n )\n )\n\n # create dataloaders\n split_names = sorted(self.datasets.keys())\n\n datasets = [self.datasets[split] for split in split_names]\n is_trains = [split in self.train_splits for split in split_names]\n\n batch_sizes = [\n self.config.run_cfg.batch_size_train\n if split == \"train\"\n else self.config.run_cfg.batch_size_eval\n for split in split_names\n ]\n\n collate_fns = []\n for dataset in datasets:\n if isinstance(dataset, tuple) or isinstance(dataset, list):\n collate_fns.append([getattr(d, \"collater\", None) for d in dataset])\n else:\n collate_fns.append(getattr(dataset, \"collater\", None))\n\n dataloaders = self.create_loaders(\n datasets=datasets,\n num_workers=self.config.run_cfg.num_workers,\n batch_sizes=batch_sizes,\n is_trains=is_trains,\n collate_fns=collate_fns,\n )\n\n self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}\n\n return self._dataloaders\n\n @property\n def cuda_enabled(self):\n return self.device.type == \"cuda\"\n\n @property\n def max_epoch(self):\n return int(self.config.run_cfg.max_epoch)\n\n @property\n def log_freq(self):\n log_freq = self.config.run_cfg.get(\"log_freq\", 50)\n return int(log_freq)\n\n @property\n def init_lr(self):\n return float(self.config.run_cfg.init_lr)\n\n @property\n def min_lr(self):\n return float(self.config.run_cfg.min_lr)\n\n @property\n def accum_grad_iters(self):\n return int(self.config.run_cfg.get(\"accum_grad_iters\", 1))\n\n @property\n def valid_splits(self):\n valid_splits = self.config.run_cfg.get(\"valid_splits\", [])\n\n if len(valid_splits) == 0:\n logging.info(\"No validation splits found.\")\n\n return valid_splits\n\n @property\n def test_splits(self):\n test_splits = self.config.run_cfg.get(\"test_splits\", [])\n\n return test_splits\n\n @property\n def train_splits(self):\n train_splits = self.config.run_cfg.get(\"train_splits\", [])\n\n if len(train_splits) == 0:\n logging.info(\"Empty train splits.\")\n\n return train_splits\n\n @property\n def evaluate_only(self):\n \"\"\"\n Set to True to skip training.\n \"\"\"\n return self.config.run_cfg.evaluate\n\n @property\n def use_dist_eval_sampler(self):\n return self.config.run_cfg.get(\"use_dist_eval_sampler\", True)\n\n @property\n def resume_ckpt_path(self):\n return self.config.run_cfg.get(\"resume_ckpt_path\", None)\n\n @property\n def train_loader(self):\n train_dataloader = self.dataloaders[\"train\"]\n\n return train_dataloader\n\n def setup_output_dir(self):\n lib_root = Path(registry.get_path(\"library_root\"))\n\n output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id\n result_dir = output_dir / \"result\"\n\n output_dir.mkdir(parents=True, exist_ok=True)\n result_dir.mkdir(parents=True, exist_ok=True)\n\n registry.register_path(\"result_dir\", str(result_dir))\n registry.register_path(\"output_dir\", str(output_dir))\n\n self.result_dir = result_dir\n self.output_dir = output_dir\n \n def model_to_betrained(self):\n if self.use_distributed:\n return self.model.module.to_be_trained()\n else:\n return self.model.to_be_trained()\n\n def train(self):\n start_time = time.time()\n best_agg_metric = -100000\n best_epoch = 0\n not_change = 0\n self.set_model_mode(self.config.run_cfg.mode)\n \n\n self.log_config()\n stop_training_flag = False\n # resume from checkpoint if specified\n if not self.evaluate_only and self.resume_ckpt_path is not None:\n self._load_checkpoint(self.resume_ckpt_path)\n\n if not self.evaluate_only:# with training\n for cur_epoch in range(self.start_epoch, self.max_epoch):\n # training phase\n if not self.evaluate_only and self.model_to_betrained():\n logging.info(\"Start training\")\n # having lora or IDs are used\n train_stats = self.train_epoch(cur_epoch)\n self.log_stats(split_name=\"train\", stats=train_stats)\n # torch.cuda.empty_cache()\n \n \n # evaluation phase\n if len(self.valid_splits) > 0:\n for split_name in self.valid_splits:\n logging.info(\"Evaluating on {}.\".format(split_name))\n\n val_log = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch\n )\n # torch.cuda.empty_cache()\n \n if val_log is not None:\n if is_main_process():\n assert (\n \"agg_metrics\" in val_log\n ), \"No agg_metrics found in validation log.\"\n\n agg_metrics = val_log[\"agg_metrics\"]\n if agg_metrics > best_agg_metric and split_name == \"valid\":\n best_epoch, best_agg_metric = cur_epoch, agg_metrics\n\n self._save_checkpoint(cur_epoch, is_best=True)\n not_change = 0\n \n \n # logging.info(\"Evaluating on {}.\".format('test'))\n # test_log = self.eval_epoch(split_name='test', cur_epoch='best', skip_reload=True)\n # logging.info(\"testing result:\", test_log)\n\n val_log.update({\"best_epoch\": best_epoch})\n self.log_stats(val_log, split_name)\n not_change += 1\n # if not_change > 20: # early stop\n # break\n # torch.cuda.empty_cache()\n\n else:\n # if no validation split is provided, we just save the checkpoint at the end of each epoch.\n if not self.evaluate_only:\n self._save_checkpoint(cur_epoch, is_best=False)\n\n if self.evaluate_only:\n break\n\n if self.config.run_cfg.distributed:\n dist.barrier()\n if not self.model_to_betrained():\n break\n if not_change > 20:\n logging.info(\"Early stop. The results has not changed up to 20 epochs.\")\n break\n\n # testing phase, would only run when evaluate_only==True\n if self.evaluate_only:\n print(\"training finish or just evaluation...\")\n logging.info(\"Evaluating on {}.\".format(self.test_splits[0]))\n test_epoch = \"best\" if len(self.valid_splits) > 0 else cur_epoch\n self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logging.info(\"Training time {}\".format(total_time_str))\n self.set_model_mode(None) # recover to the default model\n\n def evaluate(self, cur_epoch=\"best\", skip_reload=False):\n test_logs = dict()\n\n if len(self.test_splits) > 0:\n for split_name in self.test_splits:\n test_logs[split_name] = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload\n )\n\n return test_logs\n\n def train_epoch(self, epoch):\n # train\n self.model.train()\n\n return self.task.train_epoch(\n epoch=epoch,\n model=self.model,\n data_loader=self.train_loader,\n optimizer=self.optimizer,\n scaler=self.scaler,\n lr_scheduler=self.lr_scheduler,\n cuda_enabled=self.cuda_enabled,\n log_freq=self.log_freq,\n accum_grad_iters=self.accum_grad_iters,\n )\n\n @torch.no_grad()\n def eval_epoch(self, split_name, cur_epoch, skip_reload=False):\n \"\"\"\n Evaluate the model on a given split.\n\n Args:\n split_name (str): name of the split to evaluate on.\n cur_epoch (int): current epoch.\n skip_reload_best (bool): whether to skip reloading the best checkpoint.\n During training, we will reload the best checkpoint for validation.\n During testing, we will use provided weights and skip reloading the best checkpoint .\n \"\"\"\n data_loader = self.dataloaders.get(split_name, None)\n assert data_loader, \"data_loader for split {} is None.\".format(split_name)\n\n # TODO In validation, you need to compute loss as well as metrics\n # TODO consider moving to model.before_evaluation()\n model = self.unwrap_dist_model(self.model)\n if not skip_reload and cur_epoch == \"best\":\n model = self._reload_best_model(model)\n model.eval()\n\n self.task.before_evaluation(\n model=model,\n dataset=self.datasets[split_name],\n )\n results = self.task.evaluation(model, data_loader)\n\n if results is not None:\n return self.task.after_evaluation(\n val_result=results,\n split_name=split_name,\n epoch=cur_epoch,\n )\n\n def unwrap_dist_model(self, model):\n if self.use_distributed:\n return model.module\n else:\n return model\n \n def set_model_mode(self,mode):\n if self.use_distributed:\n self.model.module.set_mode(mode)\n else:\n self.model.set_mode(mode)\n\n def create_loaders(\n self,\n datasets,\n num_workers,\n batch_sizes,\n is_trains,\n collate_fns,\n dataset_ratios=None,\n ):\n \"\"\"\n Create dataloaders for training and validation.\n \"\"\"\n\n def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):\n # create a single dataloader for each split\n if isinstance(dataset, ChainDataset) or isinstance(\n dataset, wds.DataPipeline\n ):\n # wds.WebdDataset instance are chained together\n # webdataset.DataPipeline has its own sampler and collate_fn\n loader = iter(\n DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n )\n )\n else:\n # map-style dataset are concatenated together\n # setup distributed sampler\n if self.use_distributed:\n sampler = DistributedSampler(\n dataset,\n shuffle=is_train,\n num_replicas=get_world_size(),\n rank=get_rank(),\n )\n if not self.use_dist_eval_sampler:\n # e.g. retrieval evaluation\n sampler = sampler if is_train else None\n else:\n sampler = None\n\n loader = DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n sampler=sampler,\n shuffle=sampler is None and is_train,\n collate_fn=collate_fn,\n drop_last=True if is_train else False,\n )\n loader = PrefetchLoader(loader)\n\n if is_train:\n loader = IterLoader(loader, use_distributed=self.use_distributed)\n\n return loader\n\n loaders = []\n\n for dataset, bsz, is_train, collate_fn in zip(\n datasets, batch_sizes, is_trains, collate_fns\n ):\n if isinstance(dataset, list) or isinstance(dataset, tuple):\n if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None:\n dataset_ratios = [d.sample_ratio for d in dataset]\n loader = MultiIterLoader(\n loaders=[\n _create_loader(d, num_workers, bsz, is_train, collate_fn[i])\n for i, d in enumerate(dataset)\n ],\n ratios=dataset_ratios,\n )\n else:\n loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)\n\n loaders.append(loader)\n\n return loaders\n\n @main_process\n def _save_checkpoint(self, cur_epoch, is_best=False):\n \"\"\"\n Save the checkpoint at the current epoch.\n \"\"\"\n model_no_ddp = self.unwrap_dist_model(self.model)\n param_grad_dic = {\n k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()\n }\n state_dict = model_no_ddp.state_dict()\n for k in list(state_dict.keys()):\n if k in param_grad_dic.keys() and not param_grad_dic[k]:\n # delete parameters that do not require gradient\n del state_dict[k]\n save_obj = {\n \"model\": state_dict,\n \"optimizer\": self.optimizer.state_dict(),\n \"config\": self.config.to_dict(),\n \"scaler\": self.scaler.state_dict() if self.scaler else None,\n \"epoch\": cur_epoch,\n }\n save_to = os.path.join(\n self.output_dir,\n \"checkpoint_{}.pth\".format(\"best\" if is_best else cur_epoch),\n )\n logging.info(\"Saving checkpoint at epoch {} to {}.\".format(cur_epoch, save_to))\n torch.save(save_obj, save_to)\n\n def _reload_best_model(self, model):\n \"\"\"\n Load the best checkpoint for evaluation.\n \"\"\"\n checkpoint_path = os.path.join(self.output_dir, \"checkpoint_best.pth\")\n\n logging.info(\"Loading checkpoint from {}.\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(checkpoint[\"model\"])\n except RuntimeError as e:\n logging.warning(\n \"\"\"\n Key mismatch when loading checkpoint. This is expected if only part of the model is saved.\n Trying to load the model with strict=False.\n \"\"\"\n )\n model.load_state_dict(checkpoint[\"model\"], strict=False)\n return model\n\n def _load_checkpoint(self, url_or_filename):\n \"\"\"\n Resume from a checkpoint.\n \"\"\"\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=self.device)\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=self.device)\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n\n state_dict = checkpoint[\"model\"]\n self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False)\n\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n if self.scaler and \"scaler\" in checkpoint:\n self.scaler.load_state_dict(checkpoint[\"scaler\"])\n\n self.start_epoch = checkpoint[\"epoch\"] + 1\n logging.info(\"Resume checkpoint from {}\".format(url_or_filename))\n\n @main_process\n def log_stats(self, stats, split_name):\n if isinstance(stats, dict):\n log_stats = {**{f\"{split_name}_{k}\": v for k, v in stats.items()}}\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n elif isinstance(stats, list):\n pass\n\n @main_process\n def log_config(self):\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(self.config.to_dict(), indent=4) + \"\\n\")" } ]
import datetime import json import logging import os import time import torch import torch.distributed as dist import webdataset as wds from pathlib import Path from minigpt4.common.dist_utils import ( download_cached_file, get_rank, get_world_size, is_main_process, main_process, ) from minigpt4.common.registry import registry from minigpt4.common.utils import is_url from minigpt4.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset from minigpt4.datasets.datasets.dataloader_utils import ( IterLoader, MultiIterLoader, PrefetchLoader, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader, DistributedSampler from minigpt4.runners.runner_base import RunnerBase
11,148
# sampler=sampler, # shuffle=sampler is None and is_train, # collate_fn=collate_fn, # drop_last=True if is_train else False, # ) # loader = PrefetchLoader(loader) # if is_train: # loader = IterLoader(loader, use_distributed=self.use_distributed) # return loader # loaders = [] # for dataset, bsz, is_train, collate_fn in zip( # datasets, batch_sizes, is_trains, collate_fns # ): # if isinstance(dataset, list) or isinstance(dataset, tuple): # if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: # dataset_ratios = [d.sample_ratio for d in dataset] # loader = MultiIterLoader( # loaders=[ # _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) # for i, d in enumerate(dataset) # ], # ratios=dataset_ratios, # ) # else: # loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) # loaders.append(loader) # return loaders # @main_process # def _save_checkpoint(self, cur_epoch, is_best=False): # """ # Save the checkpoint at the current epoch. # """ # model_no_ddp = self.unwrap_dist_model(self.model) # param_grad_dic = { # k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() # } # state_dict = model_no_ddp.state_dict() # for k in list(state_dict.keys()): # if k in param_grad_dic.keys() and not param_grad_dic[k]: # # delete parameters that do not require gradient # del state_dict[k] # save_obj = { # "model": state_dict, # "optimizer": self.optimizer.state_dict(), # "config": self.config.to_dict(), # "scaler": self.scaler.state_dict() if self.scaler else None, # "epoch": cur_epoch, # } # save_to = os.path.join( # self.output_dir, # "checkpoint_{}.pth".format("best" if is_best else cur_epoch), # ) # logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) # torch.save(save_obj, save_to) # def _reload_best_model(self, model): # """ # Load the best checkpoint for evaluation. # """ # checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") # logging.info("Loading checkpoint from {}.".format(checkpoint_path)) # checkpoint = torch.load(checkpoint_path, map_location="cpu") # try: # model.load_state_dict(checkpoint["model"]) # except RuntimeError as e: # logging.warning( # """ # Key mismatch when loading checkpoint. This is expected if only part of the model is saved. # Trying to load the model with strict=False. # """ # ) # model.load_state_dict(checkpoint["model"], strict=False) # return model # def _load_checkpoint(self, url_or_filename): # """ # Resume from a checkpoint. # """ # if is_url(url_or_filename): # cached_file = download_cached_file( # url_or_filename, check_hash=False, progress=True # ) # checkpoint = torch.load(cached_file, map_location=self.device) # elif os.path.isfile(url_or_filename): # checkpoint = torch.load(url_or_filename, map_location=self.device) # else: # raise RuntimeError("checkpoint url or path is invalid") # state_dict = checkpoint["model"] # self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False) # self.optimizer.load_state_dict(checkpoint["optimizer"]) # if self.scaler and "scaler" in checkpoint: # self.scaler.load_state_dict(checkpoint["scaler"]) # self.start_epoch = checkpoint["epoch"] + 1 # logging.info("Resume checkpoint from {}".format(url_or_filename)) # @main_process # def log_stats(self, stats, split_name): # if isinstance(stats, dict): # log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(log_stats) + "\n") # elif isinstance(stats, list): # pass # @main_process # def log_config(self): # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ # @registry.register_runner("rec_runner_base") # class RecRunnerBase: # """ # A runner class to train and evaluate a model given a task and datasets. # The runner uses pytorch distributed data parallel by default. Future release # will support other distributed frameworks. # """ # def __init__(self, cfg, task, model, datasets, job_id): # self.config = cfg # self.job_id = job_id # self.task = task # self.datasets = datasets # self._model = model # self._wrapped_model = None # self._device = None # self._optimizer = None # self._scaler = None # self._dataloaders = None # self._lr_sched = None # self.start_epoch = 0 # # self.setup_seeds() # self.setup_output_dir() # @property # def device(self): # if self._device is None: # self._device = torch.device(self.config.run_cfg.device) # return self._device # @property # def use_distributed(self): # return self.config.run_cfg.distributed # @property # def model(self): # """ # A property to get the DDP-wrapped model on the device. # """ # # move model to device # if self._model.device != self.device: # self._model = self._model.to(self.device) # # distributed training wrapper # if self.use_distributed: # if self._wrapped_model is None: # self._wrapped_model = DDP( # self._model, device_ids=[self.config.run_cfg.gpu] # ) # else: # self._wrapped_model = self._model # return self._wrapped_model # @property # def optimizer(self): # # TODO make optimizer class and configurations # if self._optimizer is None: # num_parameters = 0 # p_wd, p_non_wd = [], [] # for n, p in self.model.named_parameters(): # if not p.requires_grad: # continue # frozen weights # print(n) # if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n: # p_non_wd.append(p) # else: # p_wd.append(p) # num_parameters += p.data.nelement() # logging.info("number of trainable parameters: %d" % num_parameters) # optim_params = [ # { # "params": p_wd, # "weight_decay": float(self.config.run_cfg.weight_decay), # }, # {"params": p_non_wd, "weight_decay": 0}, # ] # beta2 = self.config.run_cfg.get("beta2", 0.999) # self._optimizer = torch.optim.AdamW( # optim_params, # lr=float(self.config.run_cfg.init_lr), # weight_decay=float(self.config.run_cfg.weight_decay), # betas=(0.9, beta2), # ) # return self._optimizer # @property # def scaler(self): # amp = self.config.run_cfg.get("amp", False) # if amp: # if self._scaler is None: # self._scaler = torch.cuda.amp.GradScaler() # return self._scaler # @property # def lr_scheduler(self): # """ # A property to get and create learning rate scheduler by split just in need. # """ # if self._lr_sched is None: # lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched) # # max_epoch = self.config.run_cfg.max_epoch # max_epoch = self.max_epoch # # min_lr = self.config.run_cfg.min_lr # min_lr = self.min_lr # # init_lr = self.config.run_cfg.init_lr # init_lr = self.init_lr # # optional parameters # decay_rate = self.config.run_cfg.get("lr_decay_rate", None) # warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1) # warmup_steps = self.config.run_cfg.get("warmup_steps", 0) # iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None) # if iters_per_epoch is None: # try: # iters_per_epoch = len(self.dataloaders['train']) # except (AttributeError, TypeError): # iters_per_epoch = 10000 # self._lr_sched = lr_sched_cls( # optimizer=self.optimizer, # max_epoch=max_epoch, # iters_per_epoch=iters_per_epoch, # min_lr=min_lr, # init_lr=init_lr, # decay_rate=decay_rate, # warmup_start_lr=warmup_start_lr, # warmup_steps=warmup_steps, # ) # return self._lr_sched # @property # def dataloaders(self) -> dict: # """ # A property to get and create dataloaders by split just in need. # If no train_dataset_ratio is provided, concatenate map-style datasets and # chain wds.DataPipe datasets separately. Training set becomes a tuple # (ConcatDataset, ChainDataset), both are optional but at least one of them is # required. The resultant ConcatDataset and ChainDataset will be sampled evenly. # If train_dataset_ratio is provided, create a MultiIterLoader to sample # each dataset by ratios during training. # Currently do not support multiple datasets for validation and test. # Returns: # dict: {split_name: (tuples of) dataloader} # """ # if self._dataloaders is None: # # concatenate map-style datasets and chain wds.DataPipe datasets separately # # training set becomes a tuple (ConcatDataset, ChainDataset), both are # # optional but at least one of them is required. The resultant ConcatDataset # # and ChainDataset will be sampled evenly. # logging.info( # "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." # ) # datasets = reorg_datasets_by_split(self.datasets) # self.datasets = datasets # # self.datasets = concat_datasets(datasets) # # print dataset statistics after concatenation/chaining # for split_name in self.datasets: # if isinstance(self.datasets[split_name], tuple) or isinstance( # self.datasets[split_name], list # ): # # mixed wds.DataPipeline and torch.utils.data.Dataset # num_records = sum( # [ # len(d) # if not type(d) in [wds.DataPipeline, ChainDataset] # else 0 # for d in self.datasets[split_name] # ] # ) # else: # if hasattr(self.datasets[split_name], "__len__"): # # a single map-style dataset # num_records = len(self.datasets[split_name]) # else: # # a single wds.DataPipeline # num_records = -1 # logging.info( # "Only a single wds.DataPipeline dataset, no __len__ attribute." # ) # if num_records >= 0: # logging.info( # "Loaded {} records for {} split from the dataset.".format( # num_records, split_name # ) # ) # # create dataloaders # split_names = sorted(self.datasets.keys()) # datasets = [self.datasets[split] for split in split_names] # is_trains = [split in self.train_splits for split in split_names] # batch_sizes = [ # self.config.run_cfg.batch_size_train # if split == "train" # else self.config.run_cfg.batch_size_eval # for split in split_names # ] # collate_fns = [] # for dataset in datasets: # if isinstance(dataset, tuple) or isinstance(dataset, list): # collate_fns.append([getattr(d, "collater", None) for d in dataset]) # else: # collate_fns.append(getattr(dataset, "collater", None)) # dataloaders = self.create_loaders( # datasets=datasets, # num_workers=self.config.run_cfg.num_workers, # batch_sizes=batch_sizes, # is_trains=is_trains, # collate_fns=collate_fns, # ) # self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} # return self._dataloaders # @property # def cuda_enabled(self): # return self.device.type == "cuda" # @property # def max_epoch(self): # return int(self.config.run_cfg.max_epoch) # @property # def log_freq(self): # log_freq = self.config.run_cfg.get("log_freq", 50) # return int(log_freq) # @property # def init_lr(self): # return float(self.config.run_cfg.init_lr) # @property # def min_lr(self): # return float(self.config.run_cfg.min_lr) # @property # def accum_grad_iters(self): # return int(self.config.run_cfg.get("accum_grad_iters", 1)) # @property # def valid_splits(self): # valid_splits = self.config.run_cfg.get("valid_splits", []) # if len(valid_splits) == 0: # logging.info("No validation splits found.") # return valid_splits # @property # def test_splits(self): # test_splits = self.config.run_cfg.get("test_splits", []) # return test_splits # @property # def train_splits(self): # train_splits = self.config.run_cfg.get("train_splits", []) # if len(train_splits) == 0: # logging.info("Empty train splits.") # return train_splits # @property # def evaluate_only(self): # """ # Set to True to skip training. # """ # return self.config.run_cfg.evaluate # @property # def use_dist_eval_sampler(self): # return self.config.run_cfg.get("use_dist_eval_sampler", True) # @property # def resume_ckpt_path(self): # return self.config.run_cfg.get("resume_ckpt_path", None) # @property # def train_loader(self): # train_dataloader = self.dataloaders["train"] # return train_dataloader # def setup_output_dir(self): # lib_root = Path(registry.get_path("library_root")) # output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id # result_dir = output_dir / "result" # output_dir.mkdir(parents=True, exist_ok=True) # result_dir.mkdir(parents=True, exist_ok=True) # registry.register_path("result_dir", str(result_dir)) # registry.register_path("output_dir", str(output_dir)) # self.result_dir = result_dir # self.output_dir = output_dir # def train(self): # start_time = time.time() # best_agg_metric = 0 # best_epoch = 0 # self.log_config() # # resume from checkpoint if specified # if not self.evaluate_only and self.resume_ckpt_path is not None: # self._load_checkpoint(self.resume_ckpt_path) # for cur_epoch in range(self.start_epoch, self.max_epoch): # # training phase # if not self.evaluate_only: # logging.info("Start training") # train_stats = self.train_epoch(cur_epoch) # self.log_stats(split_name="train", stats=train_stats) # # evaluation phase # if len(self.valid_splits) > 0: # for split_name in self.valid_splits: # logging.info("Evaluating on {}.".format(split_name)) # val_log = self.eval_epoch( # split_name=split_name, cur_epoch=cur_epoch # ) # if val_log is not None: # if is_main_process(): # assert ( # "agg_metrics" in val_log # ), "No agg_metrics found in validation log." # agg_metrics = val_log["agg_metrics"] # if agg_metrics > best_agg_metric and split_name == "val": # best_epoch, best_agg_metric = cur_epoch, agg_metrics # self._save_checkpoint(cur_epoch, is_best=True) # val_log.update({"best_epoch": best_epoch}) # self.log_stats(val_log, split_name) # else: # # if no validation split is provided, we just save the checkpoint at the end of each epoch. # if not self.evaluate_only: # self._save_checkpoint(cur_epoch, is_best=False) # if self.evaluate_only: # break # if self.config.run_cfg.distributed: # dist.barrier() # # testing phase # test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch # self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only) # total_time = time.time() - start_time # total_time_str = str(datetime.timedelta(seconds=int(total_time))) # logging.info("Training time {}".format(total_time_str)) # def evaluate(self, cur_epoch="best", skip_reload=False): # test_logs = dict() # if len(self.test_splits) > 0: # for split_name in self.test_splits: # test_logs[split_name] = self.eval_epoch( # split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload # ) # return test_logs # def train_epoch(self, epoch): # # train # self.model.train() # return self.task.train_epoch( # epoch=epoch, # model=self.model, # data_loader=self.train_loader, # optimizer=self.optimizer, # scaler=self.scaler, # lr_scheduler=self.lr_scheduler, # cuda_enabled=self.cuda_enabled, # log_freq=self.log_freq, # accum_grad_iters=self.accum_grad_iters, # ) # @torch.no_grad() # def eval_epoch(self, split_name, cur_epoch, skip_reload=False): # """ # Evaluate the model on a given split. # Args: # split_name (str): name of the split to evaluate on. # cur_epoch (int): current epoch. # skip_reload_best (bool): whether to skip reloading the best checkpoint. # During training, we will reload the best checkpoint for validation. # During testing, we will use provided weights and skip reloading the best checkpoint . # """ # self.model.eval() # data_loader = self.dataloaders.get(split_name, None) # assert data_loader, "data_loader for split {} is None.".format(split_name) # # TODO In validation, you need to compute loss as well as metrics # # TODO consider moving to model.before_evaluation() # model = self.unwrap_dist_model(self.model) # if not skip_reload and cur_epoch == "best": # model = self._reload_best_model(model) # model.eval() # self.task.before_evaluation( # model=model, # dataset=self.datasets[split_name], # ) # results = self.task.evaluation(model, data_loader) # if results is not None: # return self.task.after_evaluation( # val_result=results, # split_name=split_name, # epoch=cur_epoch, # ) # @torch.no_grad() # def eval_epoch_new(self, split_name, cur_epoch): # """ # Evaluate the model on a given split. # Args: # split_name (str): name of the split to evaluate on. # cur_epoch (int): current epoch. # skip_reload_best (bool): whether to skip reloading the best checkpoint. # During training, we will reload the best checkpoint for validation. # During testing, we will use provided weights and skip reloading the best checkpoint . # """ # data_loader = self.dataloaders.get(split_name, None) # assert data_loader, "data_loader for split {} is None.".format(split_name) # # TODO In validation, you need to compute loss as well as metrics # # TODO consider moving to model.before_evaluation() # model = self.unwrap_dist_model(self.model) # # if not skip_reload and cur_epoch == "best": # # model = self._reload_best_model(model) # model.eval() # self.task.before_evaluation( # model=model, # dataset=self.datasets[split_name], # ) # results = self.task.evaluation(model, data_loader) # return results # # if results is not None: # # return self.task.after_evaluation( # # val_result=results, # # split_name=split_name, # # epoch=cur_epoch, # # ) # def unwrap_dist_model(self, model): # if self.use_distributed: # return model.module # else: # return model # def create_loaders( # self, # datasets, # num_workers, # batch_sizes, # is_trains, # collate_fns, # dataset_ratios=None, # ): # """ # Create dataloaders for training and validation. # """ # def _create_loader(dataset, num_workers, bsz, is_train, collate_fn): # # create a single dataloader for each split # if isinstance(dataset, ChainDataset) or isinstance( # dataset, wds.DataPipeline # ): # # wds.WebdDataset instance are chained together # # webdataset.DataPipeline has its own sampler and collate_fn # loader = iter( # DataLoader( # dataset, # batch_size=bsz, # num_workers=num_workers, # pin_memory=True, # ) # ) # else: # # map-style dataset are concatenated together # # setup distributed sampler # if self.use_distributed: # sampler = DistributedSampler( # dataset, # shuffle=is_train, # num_replicas=get_world_size(), # rank=get_rank(), # ) # if not self.use_dist_eval_sampler: # # e.g. retrieval evaluation # sampler = sampler if is_train else None # else: # sampler = None # loader = DataLoader( # dataset, # batch_size=bsz, # num_workers=num_workers, # pin_memory=True, # sampler=sampler, # shuffle=sampler is None and is_train, # collate_fn=collate_fn, # drop_last=True if is_train else False, # ) # loader = PrefetchLoader(loader) # if is_train: # loader = IterLoader(loader, use_distributed=self.use_distributed) # return loader # loaders = [] # for dataset, bsz, is_train, collate_fn in zip( # datasets, batch_sizes, is_trains, collate_fns # ): # if isinstance(dataset, list) or isinstance(dataset, tuple): # if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: # dataset_ratios = [d.sample_ratio for d in dataset] # loader = MultiIterLoader( # loaders=[ # _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) # for i, d in enumerate(dataset) # ], # ratios=dataset_ratios, # ) # else: # loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) # loaders.append(loader) # return loaders # @main_process # def _save_checkpoint(self, cur_epoch, is_best=False): # """ # Save the checkpoint at the current epoch. # """ # model_no_ddp = self.unwrap_dist_model(self.model) # param_grad_dic = { # k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() # } # state_dict = model_no_ddp.state_dict() # for k in list(state_dict.keys()): # if k in param_grad_dic.keys() and not param_grad_dic[k]: # # delete parameters that do not require gradient # del state_dict[k] # save_obj = { # "model": state_dict, # "optimizer": self.optimizer.state_dict(), # "config": self.config.to_dict(), # "scaler": self.scaler.state_dict() if self.scaler else None, # "epoch": cur_epoch, # } # save_to = os.path.join( # self.output_dir, # "checkpoint_{}.pth".format("best" if is_best else cur_epoch), # ) # logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) # torch.save(save_obj, save_to) # def _reload_best_model(self, model): # """ # Load the best checkpoint for evaluation. # """ # checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") # logging.info("Loading checkpoint from {}.".format(checkpoint_path)) # checkpoint = torch.load(checkpoint_path, map_location="cpu") # try: # model.load_state_dict(checkpoint["model"]) # except RuntimeError as e: # logging.warning( # """ # Key mismatch when loading checkpoint. This is expected if only part of the model is saved. # Trying to load the model with strict=False. # """ # ) # model.load_state_dict(checkpoint["model"], strict=False) # return model # def _load_checkpoint(self, url_or_filename): # """ # Resume from a checkpoint. # """ # if is_url(url_or_filename): # cached_file = download_cached_file( # url_or_filename, check_hash=False, progress=True # ) # checkpoint = torch.load(cached_file, map_location=self.device) # elif os.path.isfile(url_or_filename): # checkpoint = torch.load(url_or_filename, map_location=self.device) # else: # raise RuntimeError("checkpoint url or path is invalid") # state_dict = checkpoint["model"] # self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False) # self.optimizer.load_state_dict(checkpoint["optimizer"]) # if self.scaler and "scaler" in checkpoint: # self.scaler.load_state_dict(checkpoint["scaler"]) # self.start_epoch = checkpoint["epoch"] + 1 # logging.info("Resume checkpoint from {}".format(url_or_filename)) # @main_process # def log_stats(self, stats, split_name): # if isinstance(stats, dict): # log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(log_stats) + "\n") # elif isinstance(stats, list): # pass # @main_process # def log_config(self): # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
@registry.register_runner("rec_runner_base")
5
2023-10-29 12:47:25+00:00
16k
tobagin/whakarere
whakarere/windows/whakarere.py
[ { "identifier": "ConfigManager", "path": "whakarere/managers/config.py", "snippet": "class ConfigManager:\n def __init__(self, window):\n self.window = window\n self.config = {}\n self.config_file_path = os.path.expanduser(\"~/.config/whakarere/config.json\")\n atexit.register(self.save_config)\n\n def load_config(self):\n if os.path.exists(self.config_file_path):\n with open(self.config_file_path, \"r\") as f:\n self.config = json.load(f)\n\n def save_config(self):\n with open(self.config_file_path, \"w\") as f:\n json.dump(self.config, f)\n \n def set_config(self, key, value):\n self.config[key] = value\n\n def get_config(self, key):\n return self.config.get(key)" }, { "identifier": "SessionManager", "path": "whakarere/managers/session.py", "snippet": "class SessionManager:\n def __init__(self, window):\n self.window = window\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n self.current_session_id = None\n self.session_ids = []\n\n def add_session(self, session_id):\n if session_id not in self.session_ids:\n if self.check_session_id(session_id):\n self.session_ids.append(session_id)\n self.save_session_ids()\n else:\n self.terminate_session(session_id)\n session_id = self.add_session(self.generate_session_id())\n self.check_session_id(session_id)\n self.session_ids.append(session_id)\n self.save_session_ids()\n\n def remove_session(self, session_id):\n if session_id in self.session_ids:\n self.session_ids.remove(session_id)\n self.save_session_ids()\n if not self.check_session_status(session_id):\n self.terminate_session(session_id)\n\n def get_session_ids_size(self):\n return len(self.session_ids)\n\n def generate_session_id(self):\n return str(uuid.uuid4())\n\n def get_session(self, session_id):\n return self.session_ids.get(session_id)\n \n def set_current_session(self, session_id):\n self.current_session_id = session_id\n \n def get_current_session(self):\n return self.current_session_id\n\n def clear_current_session(self):\n self.current_session_id = None\n\n def get_session_ids(self):\n return self.session_ids\n \n def load_sessions(self):\n self.session_ids = self.window.config_manager.get_config(\"session_ids\")\n if self.session_ids is None:\n self.session_ids = []\n \n def save_session_ids(self):\n self.window.config_manager.set_config(\"session_ids\", self.session_ids)\n self.window.config_manager.save_config()\n \n def get_current_session_user_id(self):\n return self.window.whatsapp_manager.get_user_id(self.current_session_id)\n \n def check_session_status(self, session_id):\n url = self.api_url + f'/session/status/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_status: \" + str(result))\n \n return result \n\n def check_session_id(self, session_id):\n url = self.api_url + f'/session/start/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_id: \" + str(result))\n \n return result \n\n def terminate_session(self, session_id):\n url = self.api_url + f'/session/terminate/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_session: \" + str(result))\n \n return result \n \n def terminate_inactive_sessions(self):\n url = self.api_url + f'/session/terminateInactive'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_inactive_sessions: \" + str(result))\n \n return result \n\n def terminate_all_sessions(self, test=False):\n url = self.api_url + f'/session/terminateAll'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_inactive_sessions: \" + str(result))\n \n return result " }, { "identifier": "WhatsAppSessionManager", "path": "whakarere/managers/whatsapp.py", "snippet": "class WhatsAppSessionManager:\n def __init__(self, window):\n self.window = window\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n self.whatsapp_messenger_pages = {}\n self.chats = {} # Changed to a dictionary to map session IDs to chats\n self.chats_avatar = {} # Presumably for future functionality\n self.databases = {} # Changed to a dictionary to map session IDs to databases\n self.chat_messages = {} # Presumably for future functionality\n self.number = 0\n\n def load_or_create_databases(self):\n db_directory = os.path.expanduser(\"~/.config/whakarere/dbs\")\n\n # Ensure the database directory exists\n if not os.path.exists(db_directory):\n os.makedirs(db_directory)\n\n for session_id in self.window.session_manager.session_ids:\n db_file = f\"{session_id}.db\"\n db_path = os.path.join(db_directory, db_file)\n\n # Connect to the SQLite database\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n\n # Store the connection in the dictionary\n self.databases[session_id] = conn\n\n # Close the cursor\n cursor.close()\n\n def initialize(self):\n sessions_thread = threading.Thread(target=self.initialize_sessions)\n sessions_thread.start()\n\n def initialize_sessions(self):\n for session in self.window.session_manager.session_ids:\n if self.window.session_manager.check_session_status(session):\n result = self.get_chats(session) # Fixed assignment\n self.chats[session] = result # Store chats indexed by session ID\n for chat in result:\n chat_id = chat[\"id\"][\"_serialized\"]\n if chat[\"isGroup\"]:\n print(chat_id)\n try:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session)\n except: \n trimmed_chat_id = chat_id[-15:]\n print(trimmed_chat_id)\n self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session)\n else:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session)\n self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session)\n self.window.whatsapp_manager.add_whatsapp_messenger_page(session)\n\n def initialize_session_by_id(self, session_id):\n if self.window.session_manager.check_session_status(session_id):\n result = self.get_chats(session_id) # Fixed assignment\n self.chats[session_id] = result # Store chats indexed by session ID\n for chat in result:\n chat_id = chat[\"id\"][\"_serialized\"]\n if chat[\"isGroup\"]:\n print(chat_id)\n try:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id)\n except: \n trimmed_chat_id = chat_id[-15:]\n print(trimmed_chat_id)\n self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session_id)\n else:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id)\n self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session_id)\n if session_id not in self.whatsapp_sessions_pages:\n self.whatsapp_sessions_pages[session_id] = WhatsappMessengerPage(self, session_id)\n\n def navigate_to_whatsapp_messenger_page(self, session_id):\n # make it so it checks for for already open session on whatsapp_sessions_pages\n # if it has one and if doesn´t it creates a new one and pushes into the whatsapp_sessions_pages\n if session_id in self.whatsapp_sessions_pages:\n self.main_window.navigation_view.push(self.whatsapp_sessions_pages[session_id])\n else:\n self.add_whatsapp_messenger_page(session_id)\n self.main_window.navigation_view.push(self.whatsapp_sessions_pages[session_id])\n\n ############################\n # Chat methods\n ############################\n\n def get_chats(self, session_id):\n url = self.api_url + f'/client/getChats/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"chats\"]\n\n if(self.window.is_debug()):\n print(\"get_chats: \" + str(result))\n \n return result \n \n def chat_fetch_messages(self, chat_id, session_id):\n url = self.api_url + f'/chat/fetchMessages/{session_id}'\n result = requests.post(url, headers=self.headers, json={'chatId': chat_id})\n if(self.number == 3):\n print(result)\n\n json = result.json()\n\n if(self.window.is_debug()):\n print(\"get_chat_messages: \" + str(result))\n\n if(self.number == 3):\n print(json) \n self.number += 1\n \n return result \n\n def get_chats_by_id(self, session_id):\n return self.chats.get(session_id, [])\n\n def get_chat_avatar(self, chat_id):\n url = self.chats_avatar.get(chat_id, None)\n if url is not None:\n response = requests.get(url)\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n return Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n else:\n binary_data = base64.b64decode(UnknownContact.base64image)\n gbytes = GLib.Bytes.new(binary_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n return Gdk.Texture.new_for_pixbuf(pixbuf)\n \n def get_user_profile_picture(self, userid, session_id):\n url = self.api_url + f'/client/getProfilePicUrl/{session_id}'\n try:\n result = requests.post(url, headers=self.headers, json={'contactId': userid}).json()[\"result\"]\n except:\n result = None\n\n if(self.window.is_debug()):\n print(\"get_user_profile_picture: \" + str(result))\n \n return result \n\n def get_user_id(self, session_id):\n url = self.api_url + f'/client/getClassInfo/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"sessionInfo\"][\"wid\"][\"_serialized\"] # Extract userid\n\n if(self.window.is_debug()):\n print(\"get_user_id: \" + str(result))\n \n return result \n\n def get_user_name(self, session_id):\n url = self.api_url + f'/client/getClassInfo/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"sessionInfo\"][\"pushname\"] # Return pushname\n\n if(self.window.is_debug()):\n print(\"get_user_name: \" + str(result))\n \n return result \n\n ############################\n # Contact methods\n ############################\n\n def get_contact_info(self, contact_id, session_id):\n url = self.api_url + f'/contact/getClassInfo/{session_id}'\n result = requests.post(url, headers=self.headers, json={'contactId': contact_id}).json()\n print(result)\n if(self.window.is_debug()):\n print(\"get_contact_info: \" + str(result))\n \n return result" }, { "identifier": "WindowTitlebarWidget", "path": "whakarere/widgets/titlebar.py", "snippet": "class WindowTitlebarWidget(Gtk.Box):\n def __init__(self):\n super().__init__(orientation=Gtk.Orientation.VERTICAL, spacing=2)\n self.label_title = Gtk.Label(label=\"Whakarere\")\n self.label_title.add_css_class(\"title\")\n self.label_subtitle = Gtk.Label(label=\"Available Sessions\")\n self.label_subtitle.add_css_class(\"subtitle\")\n self.append(self.label_title)\n self.append(self.label_subtitle)\n\n def set_title(self, title):\n self.label_title.set_label(title)\n\n def set_subtitle(self, subtitle):\n self.label_subtitle.set_label(subtitle)" }, { "identifier": "MainMenuButtonWidget", "path": "whakarere/widgets/main_menu.py", "snippet": "class MainMenuButtonWidget(Gtk.MenuButton):\n def __init__(self):\n super().__init__()\n # Create MainMenu Button Widget\n self.set_icon_name(\"open-menu-symbolic\")\n self.set_tooltip_text(\"Main Menu\")\n self.set_has_frame(False)\n self.set_direction(Gtk.ArrowType.DOWN)\n self.set_popover(Gtk.Popover())\n self.get_popover().set_position(Gtk.PositionType.BOTTOM)\n self.get_popover().set_has_arrow(True)\n self.get_popover().set_size_request(200, 200)\n self.get_popover().set_child(Gtk.Label(label=\"Main Menu\"))\n \n # About Button\n about_button = Gtk.Button()\n about_button.set_label(\"About Whakarere\")\n about_button.set_has_frame(False)\n about_button.connect(\"clicked\", self.on_about_clicked)\n \n # Keyboard Shortcuts Button\n shortcut_button = Gtk.Button()\n shortcut_button.set_label(\"Keyboard Shortcuts\")\n shortcut_button.set_has_frame(False)\n shortcut_button.connect(\"clicked\", self.on_shortcuts_clicked)\n \n # Preferences Button\n preferences_button = Gtk.Button()\n preferences_button.set_label(\"Preferences\")\n preferences_button.set_has_frame(False)\n preferences_button.connect(\"clicked\", self.on_preferences_clicked)\n\n settings_menu = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n separetor = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)\n settings_menu.append(separetor)\n settings_menu.append(preferences_button)\n settings_menu.append(shortcut_button)\n settings_menu.append(about_button)\n\n self.get_popover().set_child(settings_menu)\n\n def on_about_clicked(self, button):\n about_window = Adw.AboutWindow(modal=True, transient_for=self)\n about_window.set_application_icon(\"com.mudeprolinux.whakarere\")\n about_window.set_application_name(\"Whakarere\")\n about_window.set_version(\"0.1.0\")\n #about_window.set_comments(\"A Gtk4 Whatsapp Client.\")\n about_window.set_website(\"https://mudeprolinux.com\")\n about_window.set_developer_name(\"Mude Pro Linux\")\n about_window.set_developers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_designers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_license_type(Gtk.License.MIT_X11)\n about_window.set_copyright(\"2023 © Mude Pro Linux\")\n about_window.set_issue_url(\"https://github.com/tobagin/whakarere/issues\")\n\n # Show the About window\n about_window.present()\n \n def on_shortcuts_clicked(self, button):\n shortcuts_window = Gtk.ShortcutsWindow(modal=True, transient_for=self)\n shortcuts_section = Gtk.ShortcutsSection()\n shortcuts_group = Gtk.ShortcutsGroup()\n shortcuts_section.add_group(shortcuts_group)\n shortcuts_window.add_session(shortcuts_section)\n copy_shortcut = Gtk.Shortcut.new_from_string(\"<Ctrl>C\", Gtk.Label.new(\"Copy Selected Text\"))\n shortcuts_group.add(copy_shortcut)\n shortcuts_window.show()\n\n def on_preferences_clicked(self, button):\n pass" }, { "identifier": "SessionManagerPage", "path": "whakarere/pages/session.py", "snippet": "class SessionManagerPage(Adw.NavigationPage):\n def __init__(self, app_manager):\n super().__init__()\n self.set_title(\"Whakarere\")\n self.app_manager = app_manager\n self.set_can_pop(True)\n\n # Create TitleBar Widget\n self.window_titlebar_widget = WindowTitlebarWidget()\n\n # Create MainMenu Button Widget\n self.button_settings_menu = MainMenuButtonWidget()\n\n # Create HeaderBar\n self.page_headerbar = Adw.HeaderBar()\n self.page_headerbar.set_title_widget(self.window_titlebar_widget)\n self.page_headerbar.pack_end(self.button_settings_menu)\n\n if self.app_manager.is_dev():\n self.terminate_all_sessions = Gtk.Button()\n self.terminate_all_sessions.set_label(\"T.A.S.\") # Terminate All Sessions\n self.terminate_all_sessions.set_tooltip_text(\"Terminate All Sessions\")\n self.terminate_all_sessions.connect(\"clicked\", self.app_manager.whatsapp_manager.terminate_all_sessions)\n self.page_headerbar.pack_start(self.terminate_all_sessions)\n\n # Create Account List\n self.account_list = Gio.ListStore(item_type=AccountItem)\n for session_id in self.app_manager.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n # Factory function for creating list items\n factory = Gtk.SignalListItemFactory.new()\n factory.connect('bind', self.bind_function)\n\n # Create SingleSelection\n self.selected_item = None\n self.selected_item_position = None\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n self.selection_model.connect(\"selection-changed\", self.on_selection_changed)\n\n self.account_list.connect(\"items-changed\", self.on_items_changed)\n\n # Create ListView\n self.list_view = Gtk.ListView.new(self.selection_model, factory)\n\n # Create ScrolledWindow\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_min_content_width(300)\n scrolled_window.set_min_content_height(300)\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_child(self.list_view) # Set ListView as child of ScrolledWindow\n\n # Add session button\n self.button_add_session = Gtk.Button()\n self.button_add_session.icon_name = Gio.ThemedIcon(name=\"com.mudeprolinux.whakarere-add-session-symbolic.svg\")\n button_add_session_content = Adw.ButtonContent()\n button_add_session_content.set_icon_name(\"com.mudeprolinux.whakarere-add-session-symbolic\")\n button_add_session_content.add_css_class(\"svg-icon\")\n button_add_session_content.set_label(\"Add Session\")\n self.button_add_session.set_child(button_add_session_content)\n self.button_add_session.connect(\"clicked\", self.add_new_session)\n\n # Remove session button\n self.button_remove_session = Gtk.Button()\n button_remove_session_content = Adw.ButtonContent()\n button_remove_session_content.set_icon_name(\"com.mudeprolinux.whakarere-remove-session-symbolic\")\n button_remove_session_content.add_css_class(\"svg-icon\")\n button_remove_session_content.set_label(\"Remove Session\")\n self.button_remove_session.set_child(button_remove_session_content)\n self.button_remove_session.connect(\"clicked\", self.remove_selected_session)\n\n # Launch session button\n self.button_launch_session = Gtk.Button()\n self.button_launch_session.set_hexpand(True)\n self.button_launch_session.set_halign(Gtk.Align.CENTER)\n button_launch_session_content = Adw.ButtonContent()\n button_launch_session_content.set_icon_name(\"com.mudeprolinux.whakarere-launch-session-symbolic\")\n button_launch_session_content.add_css_class(\"svg-icon\")\n button_launch_session_content.set_label(\"Launch Session\")\n self.button_launch_session.set_child(button_launch_session_content)\n self.button_launch_session.connect(\"clicked\", self.launch_selected_session)\n\n # Activate session button\n self.button_activate_session = Gtk.Button()\n self.button_activate_session.set_hexpand(True)\n self.button_activate_session.set_halign(Gtk.Align.CENTER)\n button_activate_session_content = Adw.ButtonContent()\n button_activate_session_content.set_icon_name(\"com.mudeprolinux.whakarere-qr-code-symbolic\")\n button_activate_session_content.add_css_class(\"svg-icon\")\n button_activate_session_content.set_label(\"Scan QR\")\n self.button_activate_session.set_child(button_activate_session_content)\n self.button_activate_session.connect(\"clicked\", self.activate_selected_session)\n\n page_label = Gtk.Label(label=\"<b>Create a New Session.</b>\")\n page_label.set_use_markup(True)\n page_label.set_halign(Gtk.Align.CENTER)\n page_label.set_valign(Gtk.Align.CENTER)\n\n # Create content box for list view\n content_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n content_box.set_valign(Gtk.Align.CENTER) # Vertical alignment to center\n content_box.set_halign(Gtk.Align.CENTER) # Horizontal alignment to center\n content_box.set_margin_top(10)\n content_box.set_margin_bottom(10)\n content_box.set_margin_start(10)\n content_box.set_margin_end(10)\n content_box.set_hexpand(True)\n content_box.set_vexpand(True)\n content_box.append(scrolled_window)\n #content_box.append(self.action_bar)\n\n # a button bar for the bottom of the page\n button_bar = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n button_bar.set_halign(Gtk.Align.CENTER)\n button_bar.set_hexpand(True)\n button_bar.append(self.button_add_session)\n button_bar.append(self.button_launch_session)\n button_bar.append(self.button_activate_session)\n button_bar.append(self.button_remove_session)\n if self.app_manager.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n bottom_bar = Adw.HeaderBar()\n bottom_bar.set_title_widget(button_bar)\n bottom_bar.set_show_back_button(False)\n bottom_bar.set_show_end_title_buttons(False)\n bottom_bar.set_show_start_title_buttons(False)\n\n # Create page content\n self.page_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.page_content.append(self.page_headerbar)\n self.page_content.append(content_box)\n self.page_content.append(bottom_bar)\n\n # Set page content\n self.set_child(self.page_content)\n \n def refresh_listview(self):\n # Update or refresh the data in the list store (modify as needed)\n self.account_list.remove_all()\n for session_id in self.app_manager.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n # Notify the list view to refresh\n self.list_view.set_model(self.selection_model)\n\n def on_items_changed(self, list_store, position, removed, added):\n if not removed and self.app_manager.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n def on_selection_changed(self, selection_model, positon, n_items):\n self.selected_item_position = selection_model.get_selected()\n self.selected_item = selection_model.get_selected_item()\n if self.selected_item is not None:\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n\n def add_new_session(self, button):\n session_id = self.app_manager.session_manager.generate_session_id()\n self.app_manager.session_manager.add_session(session_id)\n self.account_list.append(AccountItem(session_id))\n\n def remove_selected_session(self, button):\n # Create a new message dialog\n dialog = Adw.MessageDialog(modal=True, transient_for=self.app_manager.main_window)\n dialog.set_heading(\"Delete Session\")\n dialog.set_body(\"Are you sure you want to delete the session?\")\n\n dialog.add_response(\"cancel\", \"_Cancel\")\n dialog.add_response(\"delete\", \"_Delete\")\n\n dialog.set_response_appearance(\"delete\", Adw.ResponseAppearance.DESTRUCTIVE)\n \n dialog.set_default_response(\"cancel\")\n dialog.set_close_response(\"cancel\")\n\n dialog.connect(\"response\", self.on_response)\n\n #self.add_overlay(dialog)\n dialog.set_visible(True)\n \n def on_response(self, dialog, response):\n if response == \"delete\":\n self.account_list.remove(self.selected_item_position)\n self.app_manager.session_manager.remove_session(self.selected_item.session_id)\n self.app_manager.whatsapp_manager.terminate_session(self.selected_item.session_id)\n self.on_selection_changed(self.selection_model, None, None)\n elif response == \"cancel\":\n pass\n dialog.destroy()\n\n def launch_selected_session(self, button):\n if self.selected_item is not None:\n self.app_manager.session_manager.set_current_session(self.selected_item.session_id)\n self.app_manager.navigate_to_whatsapp_messenger_page(self.selected_item.session_id)\n \n def activate_selected_session(self, button):\n if self.selected_item is not None:\n self.app_manager.session_manager.set_current_session(self.selected_item.session_id)\n self.app_manager.navigate_to_qr_manager_page(self.selected_item.session_id)\n\n def bind_function(self, factory, list_item):\n model = list_item.get_item()\n result = self.account_list.find(model)\n position = result.position\n if model is not None:\n is_session_active = self.app_manager.whatsapp_manager.check_session_status(model.session_id)\n print(is_session_active)\n if is_session_active:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n avatar.set_halign(Gtk.Align.START)\n userid = self.app_manager.whatsapp_manager.get_user_id(model.session_id)\n response = requests.get(self.app_manager.whatsapp_manager.get_user_profile_picture(userid, model.session_id))\n response.raise_for_status()\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n avatar_image = Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n avatar.set_custom_image(avatar_image)\n hbox.append(avatar)\n label = Gtk.Label(label=f\"<b>{self.app_manager.whatsapp_manager.get_user_name(model.session_id)}</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)\n else:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n image_data = base64.b64decode(WhatsappLogoAlt.base64image)\n gbytes = GLib.Bytes.new_take(image_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n texture = Gdk.Texture.new_for_pixbuf(pixbuf)\n avatar.set_custom_image(texture)\n hbox.append(avatar)\n label = Gtk.Label(label=\"<b>No account linked.</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)" }, { "identifier": "SessionManagerPage2", "path": "whakarere/pages/session2.py", "snippet": "class SessionManagerPage2(Adw.NavigationPage):\n def __init__(self, window):\n super().__init__()\n self.set_title(\"Whakarere\")\n self.window = window\n self.set_can_pop(True)\n self.session_overlay = Gtk.Overlay()\n\n # Create TitleBar Widget\n self.window_titlebar_widget = WindowTitlebarWidget()\n self.window_titlebar_widget.set_title(\"Whakarere\")\n self.window_titlebar_widget.set_subtitle(\"A Gtk4 Whatsapp Client.\")\n # Create MainMenu Button Widget\n self.button_settings_menu = MainMenuButtonWidget()\n\n # Create HeaderBar\n self.page_headerbar = Adw.HeaderBar()\n self.page_headerbar.set_title_widget(self.window_titlebar_widget)\n self.page_headerbar.pack_end(self.button_settings_menu)\n self.add_session_button = Gtk.Button()\n self.add_session_button.set_icon_name(\"window-new-symbolic\")\n self.add_session_button.set_tooltip_text(\"Create a New Session\")\n self.add_session_button.connect(\"clicked\", self.add_new_session)\n self.page_headerbar.pack_end(self.add_session_button)\n\n if self.window.is_dev():\n self.terminate_all_sessions = Gtk.Button()\n self.terminate_all_sessions.set_label(\"T.A.S.\") # Terminate All Sessions\n self.terminate_all_sessions.set_tooltip_text(\"Terminate All Sessions\")\n self.terminate_all_sessions.connect(\"clicked\", self.window.whatsapp_manager.terminate_all_sessions)\n self.page_headerbar.pack_start(self.terminate_all_sessions)\n\n # Create Account List\n self.account_list = Gio.ListStore(item_type=AccountItem)\n for session_id in self.window.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n # Factory function for creating list items\n factory = Gtk.SignalListItemFactory.new()\n factory.connect('bind', self.bind_function)\n\n # Create SingleSelection\n self.selected_item = None\n self.selected_item_position = None\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n self.selection_model.connect(\"selection-changed\", self.on_selection_changed)\n\n self.account_list.connect(\"items-changed\", self.on_items_changed)\n\n # Create ListView\n self.list_view = Gtk.ListView.new(self.selection_model, factory)\n\n # Create ScrolledWindow\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_min_content_width(300)\n scrolled_window.set_min_content_height(300)\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_child(self.list_view) # Set ListView as child of ScrolledWindow\n\n # Add session button\n self.button_add_session = Gtk.Button()\n self.button_add_session.icon_name = Gio.ThemedIcon(name=\"com.mudeprolinux.whakarere-add-session-symbolic.svg\")\n button_add_session_content = Adw.ButtonContent()\n button_add_session_content.set_icon_name(\"com.mudeprolinux.whakarere-add-session-symbolic\")\n button_add_session_content.add_css_class(\"svg-icon\")\n button_add_session_content.set_label(\"Add Session\")\n self.button_add_session.set_child(button_add_session_content)\n self.button_add_session.connect(\"clicked\", self.add_new_session)\n\n # Remove session button\n self.button_remove_session = Gtk.Button()\n button_remove_session_content = Adw.ButtonContent()\n button_remove_session_content.set_icon_name(\"com.mudeprolinux.whakarere-remove-session-symbolic\")\n button_remove_session_content.add_css_class(\"svg-icon\")\n button_remove_session_content.set_label(\"Remove Session\")\n self.button_remove_session.set_child(button_remove_session_content)\n self.button_remove_session.connect(\"clicked\", self.remove_selected_session)\n\n # Launch session button\n self.button_launch_session = Gtk.Button()\n self.button_launch_session.set_hexpand(True)\n self.button_launch_session.set_halign(Gtk.Align.CENTER)\n button_launch_session_content = Adw.ButtonContent()\n button_launch_session_content.set_icon_name(\"com.mudeprolinux.whakarere-launch-session-symbolic\")\n button_launch_session_content.add_css_class(\"svg-icon\")\n button_launch_session_content.set_label(\"Launch Session\")\n self.button_launch_session.set_child(button_launch_session_content)\n self.button_launch_session.connect(\"clicked\", self.launch_selected_session)\n\n # Activate session button\n self.button_activate_session = Gtk.Button()\n self.button_activate_session.set_hexpand(True)\n self.button_activate_session.set_halign(Gtk.Align.CENTER)\n button_activate_session_content = Adw.ButtonContent()\n button_activate_session_content.set_icon_name(\"com.mudeprolinux.whakarere-qr-code-symbolic\")\n button_activate_session_content.add_css_class(\"svg-icon\")\n button_activate_session_content.set_label(\"Scan QR\")\n self.button_activate_session.set_child(button_activate_session_content)\n self.button_activate_session.connect(\"clicked\", self.activate_selected_session)\n\n page_label = Gtk.Label(label=\"<b>Create a New Session.</b>\")\n page_label.set_use_markup(True)\n page_label.set_halign(Gtk.Align.CENTER)\n page_label.set_valign(Gtk.Align.CENTER)\n\n # Create content box for list view\n content_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n content_box.set_valign(Gtk.Align.CENTER) # Vertical alignment to center\n content_box.set_halign(Gtk.Align.CENTER) # Horizontal alignment to center\n content_box.set_margin_top(10)\n content_box.set_margin_bottom(10)\n content_box.set_margin_start(10)\n content_box.set_margin_end(10)\n content_box.set_hexpand(True)\n content_box.set_vexpand(True)\n content_box.append(scrolled_window)\n #content_box.append(self.action_bar)\n\n # a button bar for the bottom of the page\n button_bar = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n button_bar.set_halign(Gtk.Align.CENTER)\n button_bar.set_hexpand(True)\n button_bar.append(self.button_add_session)\n button_bar.append(self.button_launch_session)\n button_bar.append(self.button_activate_session)\n button_bar.append(self.button_remove_session)\n if self.window.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n bottom_bar = Adw.HeaderBar()\n bottom_bar.set_title_widget(button_bar)\n bottom_bar.set_show_back_button(False)\n bottom_bar.set_show_end_title_buttons(False)\n bottom_bar.set_show_start_title_buttons(False)\n\n # Create page content\n self.page_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.page_content.append(self.page_headerbar)\n #self.page_content.append(content_box)\n #self.page_content.append(bottom_bar)\n self.session_overlay.set_child(self.page_content)\n # Set page content\n self.set_child(self.session_overlay)\n \n def refresh_listview(self):\n # Update or refresh the data in the list store (modify as needed)\n self.account_list.remove_all()\n for session_id in self.window.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n # Notify the list view to refresh\n self.list_view.set_model(self.selection_model)\n\n def on_items_changed(self, list_store, position, removed, added):\n if not removed and self.window.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n def on_selection_changed(self, selection_model, positon, n_items):\n self.selected_item_position = selection_model.get_selected()\n self.selected_item = selection_model.get_selected_item()\n if self.selected_item is not None:\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n\n def add_new_session(self, button):\n #self.window.main_window.set_sensitive(False) # Disable main window \n new_account_wizard = AccountWizardWindow(self.app_manager)\n new_account_wizard.set_visible(True)\n\n def remove_selected_session(self, button):\n # Create a new message dialog\n dialog = Adw.MessageDialog(modal=True, transient_for=self.window.main_window)\n dialog.set_heading(\"Delete Session\")\n dialog.set_body(\"Are you sure you want to delete the session?\")\n\n dialog.add_response(\"cancel\", \"_Cancel\")\n dialog.add_response(\"delete\", \"_Delete\")\n\n dialog.set_response_appearance(\"delete\", Adw.ResponseAppearance.DESTRUCTIVE)\n \n dialog.set_default_response(\"cancel\")\n dialog.set_close_response(\"cancel\")\n\n dialog.connect(\"response\", self.on_response)\n\n #self.add_overlay(dialog)\n dialog.set_visible(True)\n \n def on_response(self, dialog, response):\n if response == \"delete\":\n self.account_list.remove(self.selected_item_position)\n self.window.session_manager.remove_session(self.selected_item.session_id)\n self.window.whatsapp_manager.terminate_session(self.selected_item.session_id)\n self.on_selection_changed(self.selection_model, None, None)\n elif response == \"cancel\":\n pass\n dialog.destroy()\n\n def launch_selected_session(self, button):\n if self.selected_item is not None:\n self.window.session_manager.set_current_session(self.selected_item.session_id)\n self.window.navigate_to_whatsapp_messenger_page(self.selected_item.session_id)\n \n def activate_selected_session(self, button):\n if self.selected_item is not None:\n self.window.session_manager.set_current_session(self.selected_item.session_id)\n self.window.navigate_to_qr_manager_page(self.selected_item.session_id)\n\n def bind_function(self, factory, list_item):\n model = list_item.get_item()\n result = self.account_list.find(model)\n position = result.position\n if model is not None:\n is_session_active = self.window.whatsapp_manager.check_session_status(model.session_id)\n print(is_session_active)\n if is_session_active:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n avatar.set_halign(Gtk.Align.START)\n userid = self.window.whatsapp_manager.get_user_id(model.session_id)\n response = requests.get(self.window.whatsapp_manager.get_user_profile_picture(userid, model.session_id))\n response.raise_for_status()\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n avatar_image = Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n avatar.set_custom_image(avatar_image)\n hbox.append(avatar)\n label = Gtk.Label(label=f\"<b>{self.window.whatsapp_manager.get_user_name(model.session_id)}</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)\n else:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n image_data = base64.b64decode(WhatsappLogoAlt.base64image)\n gbytes = GLib.Bytes.new_take(image_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n texture = Gdk.Texture.new_for_pixbuf(pixbuf)\n avatar.set_custom_image(texture)\n hbox.append(avatar)\n label = Gtk.Label(label=\"<b>No account linked.</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)" }, { "identifier": "AccountWizardWindow", "path": "whakarere/windows/account_wizard.py", "snippet": "class AccountWizardWindow(Adw.Window):\n def __init__(self, window):\n super().__init__()\n self.window = window\n self.set_transient_for(window)\n self.set_modal(True)\n self.set_default_size(300, 300)\n self.connect(\"close-request\", self.on_modal_close_request)\n self.set_decorated(False)\n self.session_id = None\n\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n\n self.header_bar = Adw.HeaderBar()\n self.titlebar_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.title = Gtk.Label(label=\"Creating a Session\")\n self.subtitle = Gtk.Label(label=\"Please wait...\")\n self.titlebar_box.append(self.title)\n self.titlebar_box.append(self.subtitle)\n self.header_bar.set_title_widget(self.titlebar_box)\n\n self.window_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.window_content.set_size_request(200, 300)\n\n image = Gtk.Image.new_from_icon_name(\"com.mudeprolinux.whakarere\")\n image.set_pixel_size(120)\n label_title = Gtk.Label(label=\"Welcome to Whakarere\")\n label_title.set_halign(Gtk.Align.CENTER)\n label_message = Gtk.Label(label=\"Let me create a new session and I'll help you link it to your WhatsApp account.\")\n label_message.set_halign(Gtk.Align.CENTER)\n\n self.progress_bar_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.progress_bar = Gtk.ProgressBar()\n self.progress_bar.set_fraction(0.0)\n self.progress_bar.set_show_text(False)\n self.progress_bar.set_pulse_step(1)\n\n self.label_progress = Gtk.Label(label=\"Creating session...\")\n self.label_progress.set_halign(Gtk.Align.CENTER)\n self.label_progress.set_margin_top(0)\n self.progress_bar_box.append(self.progress_bar)\n self.progress_bar_box.append(self.label_progress)\n self.progress_bar_box.set_margin_top(40)\n self.progress_bar_box.set_margin_bottom(40)\n self.progress_bar_box.set_margin_start(20)\n self.progress_bar_box.set_margin_end(20)\n\n self.session_id = self.window.session_manager.generate_session_id()\n self.window.session_manager.add_session(self.session_id)\n thread = threading.Thread(target=self.update_progress_bar)\n thread.start()\n\n self.top_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.top_box.set_margin_top(20)\n self.top_box.set_halign(Gtk.Align.CENTER)\n self.top_box.set_valign(Gtk.Align.CENTER)\n self.top_box.append(image)\n self.top_box.append(label_title)\n self.top_box.append(label_message)\n self.top_box.set_margin_start(20)\n self.top_box.set_margin_end(20)\n \n self.window_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.window_content.set_size_request(200, 300)\n self.window_content.append(self.header_bar)\n self.window_content.append(self.top_box)\n self.window_content.append(self.progress_bar_box)\n self.set_content(self.window_content)\n self.present()\n\n def on_modal_close_request(self, widget):\n self.window.session_manager.remove_session(self.session_id)\n self.destroy()\n\n def update_progress_bar(self):\n self.label_progress.set_text(\"Creating session...\")\n for i in range(1, 11):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Launching session...\")\n for i in range(11, 21):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Waiting for session activation...\")\n for i in range(21, 31):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Capturing QR code...\")\n for i in range(31, 41):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Generating QR code...\")\n for i in range(41, 51):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n \n self.progress_bar.pulse()\n self.label_progress.set_text(\"Please scan QR code to continue...\")\n self.progress_bar.pulse()\n self.qr_code_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.progress_bar.pulse()\n qr_code_data = self.get_qr_code_image(self.session_id)\n self.progress_bar.pulse()\n glib_bytes = GLib.Bytes.new(qr_code_data)\n self.progress_bar.pulse()\n input_stream = Gio.MemoryInputStream.new_from_bytes(glib_bytes)\n self.progress_bar.pulse()\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n self.progress_bar.pulse()\n self.qr_code_image = Gtk.Image.new_from_pixbuf(pixbuf)\n self.progress_bar.pulse()\n self.qr_code_image.set_pixel_size(240)\n self.progress_bar.pulse()\n self.qr_code_box.append(self.qr_code_image)\n self.progress_bar.pulse()\n self.window_content.remove(self.top_box)\n self.progress_bar.pulse()\n self.window_content.insert_child_after(self.qr_code_box, self.header_bar)\n self.progress_bar.pulse()\n\n while not self.check_session_status(self.qr_code_image):\n self.progress_bar.pulse()\n time.sleep(1)\n\n self.progress_bar.set_fraction(0.50)\n\n self.label_progress.set_text(\"Syncing your chats...\")\n self.window.whatsapp_manager.initialize_session_by_id(self.session_id)\n for i in range(51, 71):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.4)\n\n self.label_progress.set_text(\"Done!\")\n\n def generate_qr_code(self, qr_code_data):\n qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)\n qr.add_data(qr_code_data)\n qr.make(fit=True)\n return qr.make_image(fill_color=\"black\", back_color=\"white\")\n\n def get_qr_code_texture(self, qr_code_data):\n qr_image = self.generate_qr_code(qr_code_data)\n pixbuf = self.pil_image_to_pixbuf(qr_image)\n return Gdk.Texture.new_for_pixbuf(pixbuf)\n\n def pil_image_to_pixbuf(self, pil_image):\n \"\"\"Convert a PIL image to a GdkPixbuf.\"\"\"\n buffer = BytesIO()\n pil_image.save(buffer)\n glib_bytes = GLib.Bytes.new(buffer.getvalue())\n loader = GdkPixbuf.PixbufLoader.new_with_type(\"png\")\n loader.write_bytes(glib_bytes)\n pixbuf = loader.get_pixbuf()\n loader.close()\n return pixbuf\n\n def get_qr_code_image(self, session_id):\n url = self.api_url + f'/session/qr/{session_id}/image'\n result = requests.get(url, headers=self.headers).content\n\n if(self.window.is_debug()):\n print(\"get_qr_code_image: \" + str(result))\n \n return result\n\n def get_qr_code_data(self, session_id):\n url = self.api_url + f'/session/qr/{session_id}'\n result = ((requests.get(url, headers=self.headers)).json())[\"qr\"]\n\n if(self.window.is_debug()):\n print(\"get_qr_code_data: \" + str(result))\n \n return result\n \n def check_session_status(self, session_id):\n url = self.api_url + f'/session/status/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_status: \" + str(result))\n \n return result" } ]
import gi from whakarere.managers.config import ConfigManager from whakarere.managers.session import SessionManager from whakarere.managers.whatsapp import WhatsAppSessionManager from whakarere.widgets.titlebar import WindowTitlebarWidget from whakarere.widgets.main_menu import MainMenuButtonWidget from whakarere.pages.session import SessionManagerPage from whakarere.pages.session2 import SessionManagerPage2 from whakarere.windows.account_wizard import AccountWizardWindow from gi.repository import Adw, Gtk, Gdk
11,819
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions self.session_manager = SessionManager(self) self.session_manager.load_sessions() # Create the whatsapp manager and initialize the active sessions self.whatsapp_manager = WhatsAppSessionManager(self) self.whatsapp_manager.initialize() # Create TitleBar Widget self.window_titlebar_widget = Adw.WindowTitle() self.window_titlebar_widget.set_title("Whakarere") self.window_titlebar_widget.set_subtitle("Your Gtk4 Whatsapp Client.") # Create MainMenu Button Widget
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions self.session_manager = SessionManager(self) self.session_manager.load_sessions() # Create the whatsapp manager and initialize the active sessions self.whatsapp_manager = WhatsAppSessionManager(self) self.whatsapp_manager.initialize() # Create TitleBar Widget self.window_titlebar_widget = Adw.WindowTitle() self.window_titlebar_widget.set_title("Whakarere") self.window_titlebar_widget.set_subtitle("Your Gtk4 Whatsapp Client.") # Create MainMenu Button Widget
self.button_settings_menu = MainMenuButtonWidget()
4
2023-10-29 15:46:50+00:00
16k
KHU-VLL/CAST
dataset/datasets.py
[ { "identifier": "TubeMaskingGenerator", "path": "util_tools/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * self.width\n self.total_patches = self.frames * self.num_patches_per_frame \n self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame)\n self.total_masks = self.frames * self.num_masks_per_frame\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.total_patches, self.total_masks\n )\n return repr_str\n\n def __call__(self):\n mask_per_frame = np.hstack([\n np.zeros(self.num_patches_per_frame - self.num_masks_per_frame),\n np.ones(self.num_masks_per_frame),\n ])\n np.random.shuffle(mask_per_frame)\n mask = np.tile(mask_per_frame, (self.frames,1)).flatten()\n return mask " }, { "identifier": "VideoClsDataset", "path": "dataset/kinetics.py", "snippet": "class VideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, data_path, mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,args=None):\n self.anno_path = anno_path\n self.data_path = data_path\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,'train',sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,'val',sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n sample = os.path.join(self.data_path,'val',sample)# self.data_path + '/videos_train/' + sample\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \\\n / (self.test_num_segment - 1), 0)\n temporal_start = int(chunk_nb * temporal_step)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start:temporal_start + self.clip_len, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start:temporal_start + self.clip_len, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if not (os.path.exists(fname)):\n return []\n\n # avoid hanging issue\n if os.path.getsize(fname) < 1 * 1024:\n print('SKIP: ', fname, \" - \", os.path.getsize(fname))\n return []\n try:\n if self.keep_aspect_ratio:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n if self.mode == 'test':\n all_index = [x for x in range(0, len(vr), self.frame_sample_rate)]\n while len(all_index) < self.clip_len:\n all_index.append(all_index[-1])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n converted_len = int(self.clip_len * self.frame_sample_rate)\n seg_len = len(vr) // self.num_segment\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.clip_len)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i*seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "VideoMAE", "path": "dataset/kinetics.py", "snippet": "class VideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own video classification dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are three items in each line: (1) video path; (2) video length and (3) video label.\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default None.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n video_loader : bool, default False.\n Whether to use video loader to load data.\n use_decord : bool, default True.\n Whether to use Decord video loader to load data. Otherwise use mmcv video loader.\n transform : function, default None.\n A function that takes data and label and transforms them.\n data_aug : str, default 'v1'.\n Different types of data augmentation auto. Supports v1, v2, v3 and v4.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n \"\"\"\n def __init__(self,\n root,\n setting,\n train=True,\n test_mode=False,\n name_pattern='img_%05d.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n video_loader=False,\n use_decord=False,\n lazy_init=False):\n\n super(VideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_loader = video_loader\n self.video_ext = video_ext\n self.use_decord = use_decord\n self.transform = transform\n self.lazy_init = lazy_init\n\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise(RuntimeError(\"Found 0 video clips in subfolders of: \" + root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n\n directory, target = self.clips[index]\n if self.video_loader:\n if '.' in directory.split('/')[-1]:\n # data in the \"setting\" file already have extension, e.g., demo.mp4\n video_name = directory\n else:\n # data in the \"setting\" file do not have extension, e.g., demo\n # So we need to provide extension (i.e., .mp4) to complete the file name.\n video_name = '{}.{}'.format(directory, self.video_ext)\n\n decord_vr = decord.VideoReader(video_name, num_threads=1)\n duration = len(decord_vr)\n\n segment_indices, skip_offsets = self._sample_train_indices(duration)\n\n images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets)\n\n process_data, mask = self.transform((images, None)) # T*C,H,W\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0,1) # T*C,H,W -> T,C,H,W -> C,T,H,W\n \n return (process_data, mask)\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, directory, setting):\n if not os.path.exists(setting):\n raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \" % (setting)))\n clips = []\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(' ')\n # line format: video_path, video_duration, video_label\n if len(line_info) < 2:\n raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line))\n clip_path = os.path.join(line_info[0])\n target = int(line_info[1])\n item = (clip_path, target)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)),\n average_duration)\n offsets = offsets + np.random.randint(average_duration,\n size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(np.random.randint(\n num_frames - self.skip_length + 1,\n size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n\n def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets):\n sampled_list = []\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n try:\n video_data = video_reader.get_batch(frame_id_list).asnumpy()\n sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)]\n except:\n raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration))\n return sampled_list" }, { "identifier": "SSVideoClsDataset", "path": "dataset/ssv2.py", "snippet": "class SSVideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, data_path, mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256,\n new_width=340, keep_aspect_ratio=True, num_segment=1,\n num_crop=1, test_num_segment=10, test_num_crop=3, args=None):\n self.anno_path = anno_path\n self.data_path = data_path\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n \n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n sample = os.path.join(self.data_path,sample)# self.data_path + '/videos_train/' + sample\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop)\n else:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb # 0/1\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::2, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::2, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if not (os.path.exists(fname)):\n return []\n\n # avoid hanging issue\n if os.path.getsize(fname) < 1 * 1024:\n print('SKIP: ', fname, \" - \", os.path.getsize(fname))\n return []\n try:\n if self.keep_aspect_ratio:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n \n if self.mode == 'test':\n all_index = []\n tick = len(vr) / float(self.num_segment)\n all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +\n [int(tick * x) for x in range(self.num_segment)]))\n while len(all_index) < (self.num_segment * self.test_num_segment):\n all_index.append(all_index[-1])\n all_index = list(np.sort(np.array(all_index))) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n average_duration = len(vr) // self.num_segment\n all_index = []\n if average_duration > 0:\n all_index += list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,\n size=self.num_segment))\n elif len(vr) > self.num_segment:\n all_index += list(np.sort(np.random.randint(len(vr), size=self.num_segment)))\n else:\n all_index += list(np.zeros((self.num_segment,)))\n all_index = list(np.array(all_index)) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "EpicVideoClsDataset", "path": "dataset/epic.py", "snippet": "class EpicVideoClsDataset(Dataset):\n \n def __init__(self, anno_path, data_path, mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256,\n new_width=340, keep_aspect_ratio=True, num_segment=1,\n num_crop=1, test_num_segment=10, test_num_crop=3, args=None):\n self.anno_path = anno_path\n self.data_path = data_path\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n \n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=',')\n self.dataset_samples = list(cleaned.values[:, 0])\n verb_label_array = list(cleaned.values[:, 1]) # verb\n noun_label_array = list(cleaned.values[:, 2]) # noun\n self.label_array = np.stack((noun_label_array, verb_label_array), axis=1) # label [noun, verb] sequence\n \n if (mode == 'train'):\n pass\n \n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif (mode == 'test'):\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n \n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n \n sample = self.dataset_samples[index] + '.mp4'\n sample = os.path.join(self.data_path, sample)\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n \n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n \n elif self.mode == 'validation':\n sample = self.dataset_samples[index] + '.mp4'\n sample = os.path.join(self.data_path, sample)\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n \n elif self.mode == 'test':\n sample = self.test_dataset[index] + '.mp4'\n sample = os.path.join(self.data_path, sample)\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop)\n else:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb # 0/1\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::2, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::2, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n \n \n\n def _aug_frame(self,buffer,args):\n\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n \n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if not (os.path.exists(fname)):\n return []\n\n # avoid hanging issue\n if os.path.getsize(fname) < 1 * 1024:\n print('SKIP: ', fname, \" - \", os.path.getsize(fname))\n return []\n try:\n if self.keep_aspect_ratio:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n \n if self.mode == 'test':\n all_index = []\n tick = len(vr) / float(self.num_segment)\n all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +\n [int(tick * x) for x in range(self.num_segment)]))\n while len(all_index) < (self.num_segment * self.test_num_segment):\n all_index.append(all_index[-1])\n all_index = list(np.sort(np.array(all_index))) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n average_duration = len(vr) // self.num_segment\n all_index = []\n if average_duration > 0:\n all_index += list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,\n size=self.num_segment))\n elif len(vr) > self.num_segment:\n all_index += list(np.sort(np.random.randint(len(vr), size=self.num_segment)))\n else:\n all_index += list(np.zeros((self.num_segment,)))\n all_index = list(np.array(all_index)) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" } ]
import os from torchvision import transforms from util_tools.transforms import * from util_tools.masking_generator import TubeMaskingGenerator from .kinetics import VideoClsDataset, VideoMAE from .ssv2 import SSVideoClsDataset from .epic import EpicVideoClsDataset
11,175
self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data, _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args) dataset = VideoMAE( root=None, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if args.data_set == 'Kinetics-400': mode = None anno_path = args.anno_path if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif args.data_set == 'SSV2': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = SSVideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=1, num_segment=args.num_frames, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 174 elif args.data_set == 'EPIC': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv')
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data, _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args) dataset = VideoMAE( root=None, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if args.data_set == 'Kinetics-400': mode = None anno_path = args.anno_path if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif args.data_set == 'SSV2': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = SSVideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=1, num_segment=args.num_frames, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 174 elif args.data_set == 'EPIC': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv')
dataset = EpicVideoClsDataset(
4
2023-10-25 07:07:05+00:00
16k
OpenProteinAI/PoET
scripts/score.py
[ { "identifier": "Uniprot21", "path": "poet/alphabets.py", "snippet": "class Uniprot21(Alphabet):\n def __init__(\n self,\n mask=False,\n include_gap=False,\n include_startstop=False,\n distinct_startstop=False,\n ):\n chars = b\"ARNDCQEGHILKMFPSTWYV\"\n gap_token = start_token = stop_token = -1\n if include_gap:\n chars = chars + b\"-\"\n gap_token = len(chars) - 1\n if include_startstop:\n chars = chars + b\"*\"\n start_token = stop_token = len(chars) - 1\n if distinct_startstop:\n chars = chars + b\"$\"\n stop_token = len(chars) - 1\n # add the synonym tokens\n mask_token = len(chars)\n chars = chars + b\"XOUBZ\"\n\n encoding = np.arange(len(chars))\n encoding[mask_token + 1 :] = [\n 11,\n 4,\n mask_token,\n mask_token,\n ] # encode 'OUBZ' as synonyms\n missing = mask_token\n\n super(Uniprot21, self).__init__(\n chars, encoding=encoding, mask=mask, missing=missing\n )\n\n self.gap_token = gap_token\n self.start_token = start_token\n self.stop_token = stop_token\n self.mask_token = mask_token" }, { "identifier": "parse_stream", "path": "poet/fasta.py", "snippet": "def parse_stream(f, comment=b\"#\", upper=True):\n name = None\n sequence = []\n for line in f:\n if line.startswith(comment):\n continue\n line = line.strip()\n if line.startswith(b\">\"):\n if name is not None:\n yield name, b\"\".join(sequence)\n name = line[1:]\n sequence = []\n else:\n if upper:\n sequence.append(line.upper())\n else:\n sequence.append(line)\n if name is not None:\n yield name, b\"\".join(sequence)" }, { "identifier": "PackedTensorSequences", "path": "poet/models/modules/packed_sequence.py", "snippet": "class PackedTensorSequences:\n def __init__(\n self,\n packed_tensor: torch.Tensor,\n positions: torch.Tensor,\n indices: Optional[torch.Tensor],\n cu_seqlens: torch.Tensor,\n cu_seqlens_cpu: torch.Tensor,\n max_s: Union[torch.Tensor, int],\n batch_size: Optional[int],\n to_paddedable: bool = True,\n ):\n \"\"\"\n If to_paddedable, indicies and batch_size must be set to values that allow this\n object to be correctly padded.\n \"\"\"\n if to_paddedable:\n assert batch_size is not None\n\n self.x = packed_tensor\n self.positions = positions\n self.indices = indices\n self.cu_seqlens = cu_seqlens\n self.cu_seqlens_cpu = cu_seqlens_cpu\n self.max_s = max_s\n self.batch_size = batch_size\n self.to_paddedable = to_paddedable\n\n @property\n def dtype(self):\n return self.x.dtype\n\n @property\n def is_cuda(self):\n return self.x.is_cuda\n\n @property\n def device(self):\n return self.x.device\n\n @staticmethod\n def pack_input(x: torch.Tensor, positions=None, key_padding_mask=None):\n b = x.size(0)\n s = x.size(1)\n if positions is None:\n positions = (\n torch.arange(s, dtype=torch.long, device=x.device)\n .unsqueeze(0)\n .expand(b, s)\n )\n if key_padding_mask is None:\n x_packed = x.reshape(b * s, -1)\n positions = positions.reshape(b * s)\n indices = None\n cu_seqlens = torch.arange(\n 0, (b + 1) * s, step=s, dtype=torch.int32, device=x.device\n )\n cu_seqlens_cpu = torch.arange(\n 0,\n (b + 1) * s,\n step=s,\n dtype=torch.int32,\n )\n max_s = s\n else:\n # flash attention padding function expects 1 for valid and 0 for invalid positions...\n key_padding_mask_bool = ~(key_padding_mask.bool())\n x_packed, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask_bool)\n cu_seqlens_cpu = cu_seqlens.cpu()\n positions, _, _, _ = unpad_input(\n positions.unsqueeze(2), key_padding_mask_bool\n )\n positions = positions.squeeze(1)\n return PackedTensorSequences(\n x_packed, positions, indices, cu_seqlens, cu_seqlens_cpu, max_s, b\n )\n\n def to_padded(self, return_mask=False, return_positions=False):\n if not self.to_paddedable:\n raise ValueError(\"Cannot be to_padded\")\n\n s = self.max_s\n b = self.batch_size\n mask = None\n x = self.x\n pos = self.positions\n if self.indices is None:\n # we are just a flattened matrix...\n x = x.view(b, s, *x.shape[1:])\n pos = pos.view(b, s)\n else:\n dims = None\n if x.ndim > 2:\n dims = x.shape[1:]\n x = x.view(x.size(0), -1)\n x, mask = pad_input(x, self.indices, b, s, return_mask=return_mask)\n pos, _ = pad_input(pos.unsqueeze(1), self.indices, b, s)\n pos = pos.squeeze(2)\n if dims is not None:\n x = x.view(x.size(0), x.size(1), *dims)\n\n if return_mask and return_positions:\n return x, mask, pos\n elif return_mask:\n return x, mask\n elif return_positions:\n return x, pos\n else:\n return x\n\n @staticmethod\n def compute_indices(seqlens: torch.Tensor):\n indices_mask = get_mask(seqlens)\n indices = torch.nonzero(~indices_mask.flatten(), as_tuple=False).flatten()\n return indices" }, { "identifier": "PoET", "path": "poet/models/poet.py", "snippet": "class PoET(nn.Module, LogitsAllocateMemoryMixin):\n def __init__(\n self,\n n_vocab: int,\n hidden_dim: int = 768,\n ff_dim: Optional[int] = None,\n num_layers: int = 6,\n nhead: int = 12,\n dropout: float = 0,\n use_multi_rotary: bool = True,\n norm: bool = False,\n mask_token: int = 21, # kept just to maintain compatability with old models\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.hidden_dim = hidden_dim\n self.dropout = dropout\n\n self.token_embed = nn.Embedding(n_vocab, hidden_dim)\n # kept just to maintain compatability with old models\n self.rotary_emb = RotaryEmbedding(hidden_dim // nhead)\n\n ff_dim = ff_dim or 4 * hidden_dim\n\n self.decoder = TransformerEncoder(\n encoder_layer=TieredRotaryTransformerEncoderLayer(\n d_model=hidden_dim,\n nhead=nhead,\n dim_feedforward=ff_dim,\n dropout=dropout,\n use_multi_rotary=use_multi_rotary,\n batch_first=True,\n causal=True,\n ),\n num_layers=num_layers,\n )\n\n if norm:\n self.norm = nn.LayerNorm(hidden_dim)\n else:\n self.norm = nn.Identity()\n\n self.linear = nn.Linear(hidden_dim, n_vocab)\n\n def embed(\n self,\n xs: torch.Tensor,\n segment_sizes: torch.Tensor,\n allow_cpu_offload: bool = False,\n pbar_position: Optional[int] = None,\n ) -> list[PackedTensorSequences]:\n \"\"\"\n Returns the memory of each layer in a list. The memory is the input to the\n multi-sequence attention.\n\n Args:\n xs:\n (B, L) sequence of sequences\n segment_sizes:\n (B, N) the lengths of each sequence in the sequence of sequences\n allow_cpu_offload:\n whether or not memory should be offloaded to cpu if CUDA OOMs\n pbar_position:\n position of a tqdm progress bar if not None\n\n Returns:\n The memory. If allow_cpu_offload and there is insufficient GPU memory to\n store the tensors, the tensors will be stored in CPU memory instead.\n \"\"\"\n seqs_seqlens = segment_sizes.sum(dim=1).type(torch.int32)\n xs, _, _, _ = unpad_input(xs.unsqueeze(2), ~get_mask(seqs_seqlens))\n xs = xs.squeeze(1)\n h = self.token_embed.forward(xs)\n\n segment_sizes_cpu = segment_sizes.cpu()\n seqs_seqlens_cpu = segment_sizes_cpu.sum(dim=1).type(torch.int32)\n nonzero_segment_sizes_cpu = (\n segment_sizes_cpu[segment_sizes_cpu > 0].flatten().type(torch.int32)\n )\n cu_seqlens_cpu = F.pad(\n nonzero_segment_sizes_cpu.cumsum(\n dim=0, dtype=nonzero_segment_sizes_cpu.dtype\n ),\n (1, 0),\n )\n cu_seqlens = cu_seqlens_cpu.to(xs.device)\n h = PackedTensorSequences(\n packed_tensor=h,\n positions=torch.cat(\n [\n torch.arange(segment_size, dtype=xs.dtype, device=xs.device)\n for segment_size in nonzero_segment_sizes_cpu\n ]\n ),\n cu_seqlens=cu_seqlens,\n cu_seqlens_cpu=cu_seqlens_cpu,\n max_s=nonzero_segment_sizes_cpu.max(),\n # only needed for unpadding (used in standard attn)\n to_paddedable=False,\n indices=None,\n batch_size=None,\n )\n\n memory = []\n output_device: Optional[torch.device] = None\n if pbar_position is None:\n layers = self.decoder.layers\n else:\n layers = tqdm(\n self.decoder.layers,\n desc=f\"[{pbar_position}] encoding\",\n leave=False,\n position=pbar_position,\n )\n for layer in layers:\n layer: TieredRotaryTransformerEncoderLayer\n try:\n h, (_, _), (key, value) = layer.forward(\n h,\n seqs_cu_seqlens=F.pad(\n seqs_seqlens.cumsum(dim=0, dtype=seqs_seqlens.dtype), (1, 0)\n ),\n seqs_cu_seqlens_cpu=F.pad(\n seqs_seqlens_cpu.cumsum(dim=0, dtype=seqs_seqlens.dtype),\n (1, 0),\n ),\n return_memory=True,\n )\n if output_device is not None:\n key.x = key.x.to(output_device)\n value.x = value.x.to(output_device)\n except RuntimeError as e:\n if \"CUDA out of memory\" in str(e) and allow_cpu_offload:\n if pbar_position is not None:\n tqdm.write(\n \"OOMed during encoding, retrying by offloading to cpu\"\n )\n torch.cuda.empty_cache()\n output_device = torch.device(\"cpu\")\n for this_memory in memory:\n this_memory.x = this_memory.x.to(output_device)\n torch.cuda.empty_cache()\n h, (_, _), (key, value) = layer.forward(\n h,\n seqs_cu_seqlens=F.pad(\n seqs_seqlens.cumsum(dim=0, dtype=seqs_seqlens.dtype), (1, 0)\n ),\n seqs_cu_seqlens_cpu=F.pad(\n seqs_seqlens_cpu.cumsum(dim=0, dtype=seqs_seqlens.dtype),\n (1, 0),\n ),\n return_memory=True,\n )\n key.x = key.x.to(output_device)\n value.x = value.x.to(output_device)\n else:\n raise e\n memory.append(key)\n memory.append(value)\n return memory\n\n def logits(\n self,\n x: torch.Tensor,\n memory: Optional[list[PackedTensorSequences]],\n preallocated_memory: bool = False,\n return_embeddings: bool = False,\n ) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"\n Compute the next token probability distributions given a precomputed memory\n (see self.embed and/or self.logits_allocate_memory).\n\n Args\n x:\n (B, L) sequence of sequences of tokens\n memory:\n output of self.embed\n if not preallocated_memory, has batch size 1 (it will be expanded if necessary)\n if memory is not on the same device as x, a copy of memory will be made to the\n device of x as necessary\n preallocated_memory:\n whether or not additional memory needed for this method was preallocated\n using self.logits_allocate_memory\n\n Returns:\n logits:\n (B, L, V) logits of the next token probability distributions. Here, V is\n the vocabulary size\n \"\"\"\n B, L_x = x.size()\n\n x: PackedTensorSequences = PackedTensorSequences.pack_input(x.unsqueeze(2))\n x.x = self.token_embed.forward(x.x.squeeze(1))\n\n x = _apply_causal_prefix_attention(\n decoder=self.decoder,\n x=x,\n batch_size=B,\n length=L_x,\n self_memory=None,\n memory=memory,\n preallocated_memory=preallocated_memory,\n )\n\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).view(B, L_x, -1)\n if not return_embeddings:\n return logits\n else:\n return logits, embeddings.view(B, L_x, -1)\n\n def sample(\n self,\n xs: torch.Tensor,\n segment_sizes: torch.Tensor,\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n batch_size: int = 1,\n ) -> tuple[torch.Tensor, float]:\n \"\"\"Sample batch_size sequences.\n\n Note: this implementation is out of date\n \"\"\"\n return self.sample_given_memory(\n memory=self.embed(xs, segment_sizes),\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n maxlen=maxlen,\n alphabet=alphabet,\n remove_invalid=remove_invalid,\n batch_size=batch_size,\n )\n\n @torch.inference_mode()\n def sample_given_memory(\n self,\n memory: Optional[list[PackedTensorSequences]],\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n batch_size: int = 1,\n ) -> tuple[list[torch.Tensor], torch.Tensor]:\n \"\"\"Sample batch_size sequences from memory.\n\n Assumes memory represents one prompt, and samples each sequence from that one\n prompt.\n\n Note: this implementation is out of date\n\n Args:\n memory:\n Output of self.embed\n Must only describe one sequence of sequences i.e. have a batch size of 1\n temperature:\n Controls the randomness of the sampling by dividing the logits\n top_k:\n Controls the number of most probable tokens to consider at each step of\n sampling\n Default is None, which means all tokens are considered\n top_p:\n Controls the cumulative probability of the most probable tokens to consider\n at each step of sampling as in nucleus sampling\n Default is None, which is equivalent to the behavior with top_p=1\n maxlen:\n Maximum sequence length to sample, not including start and stop tokens\n Thus, returned sequences with have length up to maxlen+2, where the first\n token is the start token, and the last token is the stop token if the\n sequence terminates within maxlen tokens.\n alphabet:\n The alphabet encoding the sequence.\n remove_invalid:\n Whether or not to avoid sampling non-amino acids within a sequence.\n batch_size:\n Number of sequences to sample in parallel\n\n Returns:\n A tuple (sample_xs, sample_scores), where sample_xs is a list containing the\n sampled sequences as tensors encoded by alphabet, and sample_scores is a\n tensor containing the negative log likelihood of each sampled sequence.\n \"\"\"\n criteria = nn.CrossEntropyLoss(\n ignore_index=alphabet.mask_token, reduction=\"none\"\n )\n device = next(self.parameters()).device\n dtype = next(self.parameters()).dtype\n invalid_tokens = torch.tensor(\n [alphabet.mask_token, alphabet.start_token, alphabet.gap_token],\n device=device,\n )\n nhead = self.decoder.layers[0].num_heads\n head_dim = self.decoder.layers[0].dim // nhead\n\n # initialize memory buffer\n buffer_size = (batch_size, maxlen + 2, nhead, head_dim)\n self_buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n\n # initialize x\n current_token = (\n torch.ones((batch_size, 1), dtype=torch.long, device=device)\n * alphabet.start_token\n )\n current_x = current_token\n current_position = torch.zeros((batch_size, 1), dtype=torch.long, device=device)\n current_position_int = 0\n current_logits: Optional[torch.Tensor] = None\n\n # sample rest of x\n sampled_xs, sampled_scores = [], []\n while True:\n # get logits for current x\n x: PackedTensorSequences = PackedTensorSequences.pack_input(\n current_token.unsqueeze(2),\n positions=current_position,\n )\n x.x = self.token_embed.forward(x.x.squeeze(1))\n x = _apply_causal_prefix_attention_buffered(\n decoder=self.decoder,\n x=x,\n memory=memory,\n self_buffer=[buf[:, : current_position_int + 1] for buf in self_buffer],\n buffer=[buf[:, : current_position_int + 1] for buf in buffer],\n )\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).unsqueeze(1)\n\n # sample the next token\n next_token_logits = logits[:, -1].log_softmax(dim=1)\n if remove_invalid:\n next_token_logits[:, invalid_tokens] += -torch.inf\n next_token_logits /= temperature\n next_token_logits = top_k_top_p_filtering(\n next_token_logits, top_k=top_k, top_p=top_p\n )\n next_token = torch.multinomial(\n next_token_logits.float().softmax(dim=-1), 1\n ).flatten()\n\n # update state\n current_token = next_token.unsqueeze(1)\n current_x = torch.cat([current_x, current_token], dim=1)\n current_position = current_position + 1\n current_position_int += 1\n if current_logits is None:\n current_logits = logits\n else:\n current_logits = torch.cat([current_logits, logits], dim=1)\n\n # apply sampling termination conditions\n is_stop_batch_filter = (\n (next_token == alphabet.stop_token)\n if current_x.size(1) < maxlen + 2\n else torch.ones((current_x.size(0),), dtype=torch.bool, device=device)\n )\n if is_stop_batch_filter.sum() > 0:\n is_stop_batch_idxs = torch.where(is_stop_batch_filter)[0]\n not_is_stop_batch_idxs = torch.where(~is_stop_batch_filter)[0]\n\n sampled_xs.extend(current_x[is_stop_batch_idxs].unbind())\n sampled_scores.append(\n -criteria.forward(\n current_logits[is_stop_batch_idxs].transpose(1, 2),\n current_x[is_stop_batch_idxs, 1:].cuda(),\n )\n .float()\n .sum(dim=1)\n )\n if is_stop_batch_idxs.numel() == current_x.size(0):\n break\n else:\n # remove terminated sequences from state\n _filter = not_is_stop_batch_idxs\n current_token = current_token[_filter]\n current_x = current_x[_filter]\n current_position = current_position[_filter]\n current_logits = current_logits[_filter]\n for idx in range(len(self_buffer)):\n self_buffer[idx] = self_buffer[idx][_filter]\n for idx in range(len(buffer)):\n buffer[idx] = buffer[idx][_filter]\n return sampled_xs, torch.hstack(sampled_scores)\n\n @torch.inference_mode()\n def sample_given_memories(\n self,\n memory: list[PackedTensorSequences],\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n ) -> tuple[list[torch.Tensor], torch.Tensor]:\n \"\"\"Sample one sequence for each prompt described by memory.\n\n Unlike self.sample_given_memory, memory can represent multiple prompts.\n\n This method may have higher memory requirements than self.sample_given_memory\n and self.sample_given_memories_ensemble. Roughly speaking, it may allocate\n additional memory equal to the total memory used by `memory`, whereas the other\n methods may allocate additional memory equal to the memory used by only two\n items in `memory` e.g. memory[0] and memory[1].\n\n Note: this implementation is out of date\n\n Args:\n memory:\n Output of self.embed\n temperature:\n Controls the randomness of the sampling by dividing the logits\n top_k:\n Controls the number of most probable tokens to consider at each step of\n sampling\n Default is None, which means all tokens are considered\n top_p:\n Controls the cumulative probability of the most probable tokens to consider\n at each step of sampling as in nucleus sampling\n Default is None, which is equivalent to the behavior with top_p=1\n maxlen:\n Maximum sequence length to sample, not including start and stop tokens\n Thus, returned sequences with have length up to maxlen+2, where the first\n token is the start token, and the last token is the stop token if the\n sequence terminates within maxlen tokens.\n alphabet:\n The alphabet encoding the sequence.\n remove_invalid:\n Whether or not to avoid sampling non-amino acids within a sequence.\n\n Returns:\n A tuple (sample_xs, sample_scores), where sample_xs is a list containing the\n sampled sequences as tensors encoded by alphabet, and sample_scores is a\n tensor containing the negative log likelihood of each sampled sequence.\n\n The order of the samples corresponds to the order of the prompts i.e. the nth\n sample in sample_xs/sample_scores is sampled from the nth prompt in memory.\n \"\"\"\n criteria = nn.CrossEntropyLoss(\n ignore_index=alphabet.mask_token, reduction=\"none\"\n )\n device = next(self.parameters()).device\n dtype = next(self.parameters()).dtype\n invalid_tokens = torch.tensor(\n [alphabet.mask_token, alphabet.start_token, alphabet.gap_token],\n device=device,\n )\n batch_size = memory[0].cu_seqlens.numel() - 1\n nhead = self.decoder.layers[0].num_heads\n head_dim = self.decoder.layers[0].dim // nhead\n\n # initialize memory buffer\n buffer_size = (batch_size, maxlen + 2, nhead, head_dim)\n self_buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n\n # initialize x\n current_token = (\n torch.ones((batch_size, 1), dtype=torch.long, device=device)\n * alphabet.start_token\n )\n current_x = current_token\n current_position = torch.zeros((batch_size, 1), dtype=torch.long, device=device)\n current_position_int = 0\n current_logits: Optional[torch.Tensor] = None\n\n # sample rest of x\n sampled_xs, sampled_scores = [], []\n sampled_order, remaining_order = [], torch.arange(batch_size, device=device)\n while True:\n # get logits for current x\n x: PackedTensorSequences = PackedTensorSequences.pack_input(\n current_token.unsqueeze(2),\n positions=current_position,\n )\n x.x = self.token_embed.forward(x.x.squeeze(1))\n B = x.cu_seqlens.numel() - 1\n x = _apply_causal_prefix_attention_buffered(\n decoder=self.decoder,\n x=x,\n memory=memory,\n self_buffer=[buf[:, : current_position_int + 1] for buf in self_buffer],\n buffer=[buf[:, : current_position_int + 1] for buf in buffer],\n )\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).view(B, 1, -1)\n\n # sample the next token\n next_token_logits = logits[:, -1].log_softmax(dim=1)\n if remove_invalid:\n next_token_logits[:, invalid_tokens] += -torch.inf\n next_token_logits /= temperature\n next_token_logits = top_k_top_p_filtering(\n next_token_logits, top_k=top_k, top_p=top_p\n )\n next_token = torch.multinomial(\n next_token_logits.float().softmax(dim=-1), 1\n ).flatten()\n\n # update state\n current_token = next_token.unsqueeze(1)\n current_x = torch.cat([current_x, current_token], dim=1)\n current_position = current_position + 1\n current_position_int += 1\n if current_logits is None:\n current_logits = logits\n else:\n current_logits = torch.cat([current_logits, logits], dim=1)\n\n # apply sampling termination conditions\n is_stop_batch_filter = (\n (next_token == alphabet.stop_token)\n if current_x.size(1) < maxlen + 2\n else torch.ones((current_x.size(0),), dtype=torch.bool, device=device)\n )\n if is_stop_batch_filter.sum() > 0:\n is_stop_batch_idxs = torch.where(is_stop_batch_filter)[0]\n not_is_stop_batch_idxs = torch.where(~is_stop_batch_filter)[0]\n not_is_stop_batch_idxs_cpu = not_is_stop_batch_idxs.cpu()\n\n sampled_order.append(remaining_order[is_stop_batch_idxs])\n remaining_order = remaining_order[not_is_stop_batch_idxs]\n sampled_xs.extend(current_x[is_stop_batch_idxs].unbind())\n sampled_scores.append(\n -criteria.forward(\n current_logits[is_stop_batch_idxs].transpose(1, 2),\n current_x[is_stop_batch_idxs, 1:].cuda(),\n )\n .float()\n .sum(dim=1)\n )\n if is_stop_batch_idxs.numel() == current_x.size(0):\n break\n else:\n # remove terminated sequences from state\n _filter = not_is_stop_batch_idxs\n _filter_cpu = not_is_stop_batch_idxs_cpu\n current_token = current_token[_filter]\n current_x = current_x[_filter]\n current_position = current_position[_filter]\n current_logits = current_logits[_filter]\n for idx in range(len(self_buffer)):\n self_buffer[idx] = self_buffer[idx][_filter]\n for idx in range(len(buffer)):\n buffer[idx] = buffer[idx][_filter]\n\n new_start_idxs = memory[0].cu_seqlens_cpu[:-1][_filter_cpu]\n new_end_idxs = memory[0].cu_seqlens_cpu[1:][_filter_cpu]\n filtered_idxs = torch.hstack(\n [\n torch.arange(\n new_start_idxs[idx], new_end_idxs[idx], device=device\n )\n for idx in range(_filter.numel())\n ]\n )\n memory = [copy.copy(mem) for mem in memory]\n for mem in memory:\n mem.x = mem.x[filtered_idxs]\n mem.positions = mem.positions[filtered_idxs]\n mem.cu_seqlens = F.pad(\n mem.cu_seqlens.diff()[_filter].cumsum(\n dim=0, dtype=torch.int32\n ),\n (1, 0),\n )\n mem.cu_seqlens_cpu = F.pad(\n mem.cu_seqlens_cpu.diff()[_filter_cpu].cumsum(\n dim=0, dtype=torch.int32\n ),\n (1, 0),\n )\n mem.max_s = mem.cu_seqlens_cpu.diff().max()\n mem.to_paddedable = False\n\n # order sampled sequences by the order of the input memories\n sampled_order = torch.hstack(sampled_order).argsort()\n sampled_xs = [sampled_xs[i] for i in sampled_order]\n sampled_scores = torch.hstack(sampled_scores)[sampled_order]\n return sampled_xs, sampled_scores\n\n @torch.inference_mode()\n def sample_given_memories_ensemble(\n self,\n memory: list[PackedTensorSequences],\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n ) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Sample one sequence by ensembling the prompts described by memory.\n\n Note: this implementation is out of date\n\n Args:\n memory:\n Output of self.embed\n temperature:\n Controls the randomness of the sampling by dividing the logits\n top_k:\n Controls the number of most probable tokens to consider at each step of\n sampling\n Default is None, which means all tokens are considered\n top_p:\n Controls the cumulative probability of the most probable tokens to consider\n at each step of sampling as in nucleus sampling\n Default is None, which is equivalent to the behavior with top_p=1\n maxlen:\n Maximum sequence length to sample, not including start and stop tokens\n Thus, returned sequences with have length up to maxlen+2, where the first\n token is the start token, and the last token is the stop token if the\n sequence terminates within maxlen tokens.\n alphabet:\n The alphabet encoding the sequence.\n remove_invalid:\n Whether or not to avoid sampling non-amino acids within a sequence.\n\n Returns:\n A tuple (sample_x, sample_scores), where sample_x is the sampled sequence\n encoded by alphabet, and sample_scores is a tensor containing the negative\n log likelihood of sample_x conditioned on each prompt in memory.\n \"\"\"\n device = next(self.parameters()).device\n dtype = next(self.parameters()).dtype\n invalid_tokens = torch.tensor(\n [alphabet.mask_token, alphabet.start_token, alphabet.gap_token],\n device=device,\n )\n batch_size = memory[0].cu_seqlens.numel() - 1\n nhead = self.decoder.layers[0].num_heads\n head_dim = self.decoder.layers[0].dim // nhead\n\n # initialize memory buffer\n buffer_size = (batch_size, maxlen + 2, nhead, head_dim)\n self_buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n\n # initialize x\n current_token = (\n torch.ones((batch_size, 1), dtype=torch.long, device=device)\n * alphabet.start_token\n )\n current_x = current_token\n current_position = torch.zeros((batch_size, 1), dtype=torch.long, device=device)\n current_position_int = 0\n current_logits_sum = torch.zeros(\n (batch_size,), dtype=torch.float32, device=device\n )\n\n # sample rest of x\n while True:\n # get logits for current x\n x: PackedTensorSequences = PackedTensorSequences.pack_input(\n current_token.unsqueeze(2),\n positions=current_position,\n )\n x.x = self.token_embed.forward(x.x.squeeze(1))\n B = x.cu_seqlens.numel() - 1\n x = _apply_causal_prefix_attention_buffered(\n decoder=self.decoder,\n x=x,\n memory=memory,\n self_buffer=[buf[:, : current_position_int + 1] for buf in self_buffer],\n buffer=[buf[:, : current_position_int + 1] for buf in buffer],\n )\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).view(B, 1, -1)\n\n # sample the next token\n next_token_logits = logits[:, -1].log_softmax(dim=1)\n weights = current_logits_sum.softmax(dim=0)\n per_memory_next_token_logits = next_token_logits\n next_token_logits = (next_token_logits * weights.unsqueeze(1)).sum(dim=0)\n if remove_invalid:\n next_token_logits[invalid_tokens] += -torch.inf\n next_token_logits /= temperature\n next_token_logits = top_k_top_p_filtering(\n next_token_logits.unsqueeze(0), top_k=top_k, top_p=top_p\n ).squeeze(0)\n next_token = torch.multinomial(next_token_logits.float().softmax(dim=-1), 1)\n\n # update state\n current_token = next_token.unsqueeze(0).expand(batch_size, -1)\n current_x = torch.cat([current_x, current_token], dim=1)\n current_position = current_position + 1\n current_position_int += 1\n current_logits_sum += per_memory_next_token_logits[:, next_token].flatten()\n\n # apply sampling termination conditions\n if next_token == alphabet.stop_token or current_x.size(1) == maxlen + 2:\n return current_x[0], current_logits_sum\n\n def forward(self, xs: torch.Tensor, segment_sizes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Compute the next token probability distributions.\n\n Examples:\n Example input with batch size 1\n\n xs: [$ A B * $ A B C * $ E F]\n segment_sizes: [[4, 5, 3]]\n\n Note that the last sequence in a sequence of sequences does not need to have a\n stop token.\n\n Args:\n xs:\n (B, L) sequence of sequences of tokens\n segment_sizes:\n (B, N) the lengths of each sequence in the sequence of sequences\n\n Returns:\n (B, L, V) logits of the next token probability distributions. Here, V is\n the vocabulary size\n\n \"\"\"\n B, L = xs.size()\n\n seqs_seqlens = segment_sizes.sum(dim=1).type(torch.int32)\n xs, indices, _, _ = unpad_input(xs.unsqueeze(2), ~get_mask(seqs_seqlens))\n xs = xs.squeeze(1)\n h = self.token_embed.forward(xs)\n\n segment_sizes_cpu = segment_sizes.cpu()\n seqs_seqlens_cpu = segment_sizes_cpu.sum(dim=1).type(torch.int32)\n nonzero_segment_sizes_cpu = (\n segment_sizes_cpu[segment_sizes_cpu > 0].flatten().type(torch.int32)\n )\n cu_seqlens_cpu = F.pad(\n nonzero_segment_sizes_cpu.cumsum(\n dim=0, dtype=nonzero_segment_sizes_cpu.dtype\n ),\n (1, 0),\n )\n cu_seqlens = cu_seqlens_cpu.to(xs.device)\n h = PackedTensorSequences(\n packed_tensor=h,\n positions=torch.cat(\n [\n torch.arange(segment_size, dtype=xs.dtype, device=xs.device)\n for segment_size in nonzero_segment_sizes_cpu\n ]\n ),\n cu_seqlens=cu_seqlens,\n cu_seqlens_cpu=cu_seqlens_cpu,\n max_s=nonzero_segment_sizes_cpu.max(),\n # only needed for unpadding (used in standard attn)\n to_paddedable=False,\n indices=None,\n batch_size=None,\n )\n h = self.decoder.forward(\n h,\n seqs_cu_seqlens=F.pad(\n seqs_seqlens.cumsum(dim=0, dtype=seqs_seqlens.dtype), (1, 0)\n ),\n seqs_cu_seqlens_cpu=F.pad(\n seqs_seqlens_cpu.cumsum(dim=0, dtype=seqs_seqlens_cpu.dtype),\n (1, 0),\n ),\n )\n\n logits = self.linear.forward(self.norm(h.x))\n logits, _ = pad_input(logits, indices, B, L) # (B,L,num_tokens)\n return logits" }, { "identifier": "MSASampler", "path": "poet/msa/sampling.py", "snippet": "class MSASampler(BaseModel):\n # TODO: refactor msa sampling code...\n method: Union[TopSampler, RandomSampler, NeighborsSampler] = Field(\n ..., discriminator=\"sampler_type\"\n )\n force_include_first: bool = False\n max_similarity: float = 1.0\n max_dissimilarity: float = 1.0\n\n def _get_sim_filtered_idxs(self, msa: np.ndarray) -> np.ndarray:\n nonnormalized_sim = (msa == msa[[0]]).sum(axis=1)\n normfactor = msa.shape[1]\n norm_sim = nonnormalized_sim / normfactor\n\n assert (norm_sim.min() >= 0) and (norm_sim.max() <= 1)\n dsim = 1 - norm_sim\n\n max_sim_filter = norm_sim <= self.max_similarity\n max_dissim_filter = dsim <= self.max_dissimilarity\n return np.where(max_sim_filter & max_dissim_filter)[0]\n\n def get_sample_idxs(\n self,\n msa: np.ndarray,\n gap_token: int,\n seed: Optional[int] = None,\n result_cache_dir: Optional[Path] = None,\n ) -> np.ndarray:\n _, weights = self.method.get_weights(\n msa=msa, gap_token=gap_token, result_cache_dir=result_cache_dir\n )\n\n original_msa_sample_idxs = np.arange(len(msa))\n sample_idxs = self._get_sim_filtered_idxs(msa)\n original_msa_sample_idxs = original_msa_sample_idxs[sample_idxs]\n msa = msa[sample_idxs]\n weights = weights[sample_idxs]\n\n sample_idxs = self.method.get_sample_idxs(msa=msa, weights=weights, seed=seed)\n original_msa_sample_idxs = original_msa_sample_idxs[sample_idxs]\n del msa, weights\n\n if self.force_include_first:\n original_msa_sample_idxs = np.concatenate(\n [[0], original_msa_sample_idxs[original_msa_sample_idxs != 0]]\n )\n return original_msa_sample_idxs" }, { "identifier": "NeighborsSampler", "path": "poet/msa/sampling.py", "snippet": "class NeighborsSampler(BaseModel):\n sampler_type: Literal[\"neighbors\"] = \"neighbors\"\n theta: float = 0.2\n can_use_torch: bool = True\n\n def get_weights(\n self, msa: np.ndarray, gap_token: int, result_cache_dir: Optional[Path] = None\n ) -> tuple[Optional[float], Optional[np.ndarray]]:\n assert msa.dtype == np.uint8\n return compute_homology_weights(\n ungapped_msa=msa,\n theta=self.theta,\n gap_token=gap_token,\n gap_token_mask=255,\n result_cache_dir=result_cache_dir,\n can_use_torch=self.can_use_torch,\n )\n\n def get_sample_idxs(\n self,\n msa: np.ndarray,\n weights: Optional[np.ndarray] = None,\n seed: Optional[int] = None,\n ) -> np.ndarray:\n assert weights is not None\n if len(msa) == 0:\n return np.array([], dtype=int)\n size = len(msa)\n rng = np.random.default_rng(seed) if seed is not None else np.random\n return rng.choice(len(msa), replace=False, size=size, p=weights / weights.sum())" } ]
import argparse import itertools import string import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from pathlib import Path from typing import Callable, Optional, Sequence, TypeVar from torch.nn.utils.rnn import pad_sequence from tqdm import tqdm, trange from poet.alphabets import Uniprot21 from poet.fasta import parse_stream from poet.models.modules.packed_sequence import PackedTensorSequences from poet.models.poet import PoET from poet.msa.sampling import MSASampler, NeighborsSampler
11,929
[torch.from_numpy(v).long() for v in this_variants], batch_first=True, padding_value=alphabet.mask_token, ) if this_variants.size(1) < max_variant_length: this_variants = F.pad( this_variants, (0, max_variant_length - this_variants.size(1)), value=alphabet.mask_token, ) assert (this_variants == alphabet.gap_token).sum() == 0 this_variants = this_variants.cuda() logits = model.logits(this_variants[:, :-1], memory, preallocated_memory=True) targets = this_variants[:, 1:] score = -criteria.forward(logits.transpose(1, 2), targets).float().sum(dim=1) logps.append(score.cpu().numpy()) return np.hstack(logps) def get_logps_tiered_fast( msa_sequences: Sequence[np.ndarray], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: if len(msa_sequences) > 0: segment_sizes = torch.tensor([len(s) for s in msa_sequences]).cuda() msa_sequences: torch.Tensor = torch.cat( [torch.from_numpy(s).long() for s in msa_sequences] ).cuda() memory = model.embed( msa_sequences.unsqueeze(0), segment_sizes.unsqueeze(0), pbar_position=pbar_position, ) else: memory = None return _get_logps_tiered_fast( memory=memory, variants=variants, model=model, batch_size=batch_size, alphabet=alphabet, pbar_position=pbar_position, ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, default="data/poet.ckpt") parser.add_argument( "--msa_a3m_path", type=str, default="data/BLAT_ECOLX_ColabFold_2202.a3m" ) parser.add_argument( "--variants_fasta_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.fasta", ) parser.add_argument( "--output_npy_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.npy", ) parser.add_argument("--batch_size", type=int, default=8) parser.add_argument("--seed", type=int, default=188257) parser.add_argument( "--debug", action="store_true", help="run only 1/15 params from the msa sampling and filtering ensemble", ) args = parser.parse_args() args.msa_a3m_path = Path(args.msa_a3m_path) args.variants_fasta_path = Path(args.variants_fasta_path) args.output_npy_path = Path(args.output_npy_path) return args @torch.inference_mode() def main(): args = parse_args() # load model ckpt = torch.load(args.ckpt_path) model = PoET(**ckpt["hyper_parameters"]["model_spec"]["init_args"]) model.load_state_dict( {k.split(".", 1)[1]: v for k, v in ckpt["state_dict"].items()} ) del ckpt model = model.cuda().half().eval() alphabet = Uniprot21( include_gap=True, include_startstop=True, distinct_startstop=True ) jit_warmup(model, alphabet) # get variants to score variants = [ append_startstop(alphabet.encode(v), alphabet=alphabet) for v in get_seqs_from_fastalike(args.variants_fasta_path) ] # process msa msa_sequences = get_seqs_from_fastalike(args.msa_a3m_path) msa = get_encoded_msa_from_a3m_seqs(msa_sequences=msa_sequences, alphabet=alphabet) # score the variants logps = [] if not args.debug: params = list( itertools.product( [6144, 12288, 24576], [1.0, 0.95, 0.90, 0.70, 0.50], ) ) else: params = [(12288, 0.95)] for max_tokens, max_similarity in tqdm(params, desc="ensemble"):
ASCII_LOWERCASE_BYTES = string.ascii_lowercase.encode() PBAR_POSITION = 1 T = TypeVar("T", np.ndarray, torch.Tensor) def append_startstop(x: T, alphabet: Uniprot21) -> T: x_ndim = x.ndim assert x_ndim in {1, 2} if x_ndim == 1: x = x[None, :] if isinstance(x, torch.Tensor): empty_func = torch.empty else: empty_func = np.empty x_ = empty_func((x.shape[0], x.shape[1] + 2), dtype=x.dtype) x_[:, 0] = alphabet.start_token x_[:, -1] = alphabet.stop_token x_[:, 1:-1] = x if x_ndim == 1: x_ = x_.flatten() return x_ def get_seqs_from_fastalike(filepath: Path) -> list[bytes]: return [s for _, s in parse_stream(open(filepath, "rb"), upper=False)] def get_encoded_msa_from_a3m_seqs( msa_sequences: list[bytes], alphabet: Uniprot21 ) -> np.ndarray: return np.vstack( [ alphabet.encode(s.translate(None, delete=ASCII_LOWERCASE_BYTES)) for s in msa_sequences ] ) def sample_msa_sequences( get_sequence_fn: Callable[[int], bytes], sample_idxs: Sequence[int], max_tokens: int, alphabet: Uniprot21, shuffle: bool = True, shuffle_seed: Optional[int] = None, truncate: bool = True, ) -> list[np.ndarray]: assert alphabet.start_token != -1 assert alphabet.stop_token != -1 if not shuffle: assert shuffle_seed is None seqs, total_tokens = [], 0 for idx in sample_idxs: next_sequence = get_sequence_fn(idx) seqs.append(append_startstop(alphabet.encode(next_sequence), alphabet=alphabet)) total_tokens += len(seqs[-1]) if total_tokens > max_tokens: break # shuffle order and truncate to max tokens if shuffle: rng = ( np.random.default_rng(shuffle_seed) if shuffle_seed is not None else np.random ) final_permutation = rng.permutation(len(seqs)) else: final_permutation = np.arange(len(seqs)) final_seqs, total_tokens = [], 0 for seq in [seqs[i] for i in final_permutation]: if truncate and (total_tokens + len(seq) > max_tokens): seq = seq[: max_tokens - total_tokens] total_tokens += len(seq) final_seqs.append(seq) if total_tokens >= max_tokens: break return final_seqs def jit_warmup(embedding_model: PoET, alphabet: Uniprot21): x = b"$WAAAGH*$WAAGW*" segment_sizes = [8, 7] x = alphabet.encode(x) # encode x into the uniprot21 alphabet x = torch.from_numpy(x).long().cuda() segment_sizes = torch.tensor(segment_sizes).long().cuda() _ = embedding_model.embed(x.unsqueeze(0), segment_sizes.unsqueeze(0)) def _get_logps_tiered_fast( memory: Optional[list[PackedTensorSequences]], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: max_variant_length = max(len(v) for v in variants) memory = model.logits_allocate_memory( memory=memory, batch_size=batch_size, length=max_variant_length - 1, # discount stop token ) criteria = nn.CrossEntropyLoss(ignore_index=alphabet.mask_token, reduction="none") logps = [] if pbar_position is not None: pbar = trange( 0, len(variants), batch_size, desc=f"[{pbar_position}] decoding", leave=False, position=pbar_position, ) else: pbar = range(0, len(variants), batch_size) for start_idx in pbar: this_variants = variants[start_idx : start_idx + batch_size] this_variants = pad_sequence( [torch.from_numpy(v).long() for v in this_variants], batch_first=True, padding_value=alphabet.mask_token, ) if this_variants.size(1) < max_variant_length: this_variants = F.pad( this_variants, (0, max_variant_length - this_variants.size(1)), value=alphabet.mask_token, ) assert (this_variants == alphabet.gap_token).sum() == 0 this_variants = this_variants.cuda() logits = model.logits(this_variants[:, :-1], memory, preallocated_memory=True) targets = this_variants[:, 1:] score = -criteria.forward(logits.transpose(1, 2), targets).float().sum(dim=1) logps.append(score.cpu().numpy()) return np.hstack(logps) def get_logps_tiered_fast( msa_sequences: Sequence[np.ndarray], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: if len(msa_sequences) > 0: segment_sizes = torch.tensor([len(s) for s in msa_sequences]).cuda() msa_sequences: torch.Tensor = torch.cat( [torch.from_numpy(s).long() for s in msa_sequences] ).cuda() memory = model.embed( msa_sequences.unsqueeze(0), segment_sizes.unsqueeze(0), pbar_position=pbar_position, ) else: memory = None return _get_logps_tiered_fast( memory=memory, variants=variants, model=model, batch_size=batch_size, alphabet=alphabet, pbar_position=pbar_position, ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, default="data/poet.ckpt") parser.add_argument( "--msa_a3m_path", type=str, default="data/BLAT_ECOLX_ColabFold_2202.a3m" ) parser.add_argument( "--variants_fasta_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.fasta", ) parser.add_argument( "--output_npy_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.npy", ) parser.add_argument("--batch_size", type=int, default=8) parser.add_argument("--seed", type=int, default=188257) parser.add_argument( "--debug", action="store_true", help="run only 1/15 params from the msa sampling and filtering ensemble", ) args = parser.parse_args() args.msa_a3m_path = Path(args.msa_a3m_path) args.variants_fasta_path = Path(args.variants_fasta_path) args.output_npy_path = Path(args.output_npy_path) return args @torch.inference_mode() def main(): args = parse_args() # load model ckpt = torch.load(args.ckpt_path) model = PoET(**ckpt["hyper_parameters"]["model_spec"]["init_args"]) model.load_state_dict( {k.split(".", 1)[1]: v for k, v in ckpt["state_dict"].items()} ) del ckpt model = model.cuda().half().eval() alphabet = Uniprot21( include_gap=True, include_startstop=True, distinct_startstop=True ) jit_warmup(model, alphabet) # get variants to score variants = [ append_startstop(alphabet.encode(v), alphabet=alphabet) for v in get_seqs_from_fastalike(args.variants_fasta_path) ] # process msa msa_sequences = get_seqs_from_fastalike(args.msa_a3m_path) msa = get_encoded_msa_from_a3m_seqs(msa_sequences=msa_sequences, alphabet=alphabet) # score the variants logps = [] if not args.debug: params = list( itertools.product( [6144, 12288, 24576], [1.0, 0.95, 0.90, 0.70, 0.50], ) ) else: params = [(12288, 0.95)] for max_tokens, max_similarity in tqdm(params, desc="ensemble"):
sampler = MSASampler(
4
2023-10-28 01:30:26+00:00
16k
Transconnectome/SwiFT
interpretation/integrated_gradient.py
[ { "identifier": "SwinTransformer4D", "path": "project/module/models/swin4d_transformer_ver7.py", "snippet": "class SwinTransformer4D(nn.Module):\n \"\"\"\n Swin Transformer based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n \"\"\"\n\n def __init__(\n self,\n img_size: Tuple,\n in_chans: int,\n embed_dim: int,\n window_size: Sequence[int],\n first_window_size: Sequence[int],\n patch_size: Sequence[int],\n depths: Sequence[int],\n num_heads: Sequence[int],\n mlp_ratio: float = 4.0,\n qkv_bias: bool = True,\n drop_rate: float = 0.0,\n attn_drop_rate: float = 0.0,\n drop_path_rate: float = 0.0,\n norm_layer: Type[LayerNorm] = nn.LayerNorm,\n patch_norm: bool = False,\n use_checkpoint: bool = False,\n spatial_dims: int = 4,\n c_multiplier: int = 2,\n last_layer_full_MSA: bool = False,\n downsample=\"mergingv2\",\n num_classes=2,\n to_float: bool = False,\n **kwargs,\n ) -> None:\n \"\"\"\n Args:\n in_chans: dimension of input channels.\n embed_dim: number of linear projection output channels.\n window_size: local window size.\n patch_size: patch size.\n depths: number of layers in each stage.\n num_heads: number of attention heads.\n mlp_ratio: ratio of mlp hidden dim to embedding dim.\n qkv_bias: add a learnable bias to query, key, value.\n drop_rate: dropout rate.\n attn_drop_rate: attention dropout rate.\n drop_path_rate: stochastic depth rate.\n norm_layer: normalization layer.\n patch_norm: add normalization after patch embedding.\n use_checkpoint: use gradient checkpointing for reduced memory usage.\n spatial_dims: spatial dimension.\n downsample: module used for downsampling, available options are `\"mergingv2\"`, `\"merging\"` and a\n user-specified `nn.Module` following the API defined in :py:class:`monai.networks.nets.PatchMerging`.\n The default is currently `\"merging\"` (the original version defined in v0.9.0).\n\n\n c_multiplier: multiplier for the feature length after patch merging\n \"\"\"\n\n super().__init__()\n img_size = ensure_tuple_rep(img_size, spatial_dims)\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.patch_norm = patch_norm\n self.window_size = window_size\n self.first_window_size = first_window_size\n self.patch_size = patch_size\n self.to_float = to_float\n self.patch_embed = PatchEmbed(\n img_size=img_size,\n patch_size=self.patch_size,\n in_chans=in_chans,\n embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None, # type: ignore\n flatten=False,\n spatial_dims=spatial_dims,\n )\n grid_size = self.patch_embed.grid_size\n self.grid_size = grid_size\n self.pos_drop = nn.Dropout(p=drop_rate)\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]\n\n #patch_num = int((img_size[0]/patch_size[0]) * (img_size[1]/patch_size[1]) * (img_size[2]/patch_size[2]))\n #time_num = int(img_size[3]/patch_size[3])\n patch_dim = ((img_size[0]//patch_size[0]), (img_size[1]//patch_size[1]), (img_size[2]//patch_size[2]), (img_size[3]//patch_size[3]))\n\n #print img, patch size, patch dim\n print(\"img_size: \", img_size)\n print(\"patch_size: \", patch_size)\n print(\"patch_dim: \", patch_dim)\n self.pos_embeds = nn.ModuleList()\n pos_embed_dim = embed_dim\n for i in range(self.num_layers):\n self.pos_embeds.append(PositionalEmbedding(pos_embed_dim, patch_dim))\n pos_embed_dim = pos_embed_dim * c_multiplier\n patch_dim = (patch_dim[0]//2, patch_dim[1]//2, patch_dim[2]//2, patch_dim[3])\n\n # build layer\n self.layers = nn.ModuleList()\n down_sample_mod = look_up_option(downsample, MERGING_MODE) if isinstance(downsample, str) else downsample\n \n layer = BasicLayer(\n dim=int(embed_dim),\n depth=depths[0],\n num_heads=num_heads[0],\n window_size=self.first_window_size,\n drop_path=dpr[sum(depths[:0]) : sum(depths[: 0 + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=down_sample_mod if 0 < self.num_layers - 1 else None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n # exclude last layer\n for i_layer in range(1, self.num_layers - 1):\n layer = BasicLayer(\n dim=int(embed_dim * (c_multiplier**i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=self.window_size,\n drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=down_sample_mod if i_layer < self.num_layers - 1 else None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n if not last_layer_full_MSA:\n layer = BasicLayer(\n dim=int(embed_dim * c_multiplier ** (self.num_layers - 1)),\n depth=depths[(self.num_layers - 1)],\n num_heads=num_heads[(self.num_layers - 1)],\n window_size=self.window_size,\n drop_path=dpr[sum(depths[: (self.num_layers - 1)]) : sum(depths[: (self.num_layers - 1) + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n else:\n #################Full MSA for last layer#####################\n\n self.last_window_size = (\n self.grid_size[0] // int(2 ** (self.num_layers - 1)),\n self.grid_size[1] // int(2 ** (self.num_layers - 1)),\n self.grid_size[2] // int(2 ** (self.num_layers - 1)),\n self.window_size[3],\n )\n\n layer = BasicLayer_FullAttention(\n dim=int(embed_dim * c_multiplier ** (self.num_layers - 1)),\n depth=depths[(self.num_layers - 1)],\n num_heads=num_heads[(self.num_layers - 1)],\n # change the window size to the entire grid size\n window_size=self.last_window_size,\n drop_path=dpr[sum(depths[: (self.num_layers - 1)]) : sum(depths[: (self.num_layers - 1) + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n #############################################################\n\n self.num_features = int(embed_dim * c_multiplier ** (self.num_layers - 1))\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1) #\n self.head = nn.Linear(self.num_features, 1) if num_classes == 2 else num_classes\n\n\n def forward(self, x):\n\n #print model parameters\n # for name, param in self.named_parameters():\n # if param.requires_grad:\n # print(name, param.data.shape)\n\n if self.to_float:\n # converting tensor to float\n x = x.float()\n x = self.patch_embed(x)\n x = self.pos_drop(x) # (b, c, h, w, d, t)\n\n for i in range(self.num_layers):\n x = self.pos_embeds[i](x)\n x = self.layers[i](x.contiguous())\n\n # moved this part to clf_mlp or reg_mlp\n\n # x = x.flatten(start_dim=2).transpose(1, 2) # B L C\n # x = self.norm(x) # B L C\n # x = self.avgpool(x.transpose(1, 2)) # B C 1\n # x = torch.flatten(x, 1)\n # x = self.head(x)\n\n return x" }, { "identifier": "LitClassifier", "path": "project/module/pl_classifier.py", "snippet": "class LitClassifier(pl.LightningModule):\n def __init__(self,data_module, **kwargs):\n super().__init__()\n self.save_hyperparameters(kwargs) # save hyperparameters except data_module (data_module cannot be pickled as a checkpoint)\n \n # you should define target_values at the Dataset classes\n target_values = data_module.train_dataset.target_values\n if self.hparams.label_scaling_method == 'standardization':\n scaler = StandardScaler()\n normalized_target_values = scaler.fit_transform(target_values)\n print(f'target_mean:{scaler.mean_[0]}, target_std:{scaler.scale_[0]}')\n elif self.hparams.label_scaling_method == 'minmax': \n scaler = MinMaxScaler()\n normalized_target_values = scaler.fit_transform(target_values)\n print(f'target_max:{scaler.data_max_[0]},target_min:{scaler.data_min_[0]}')\n self.scaler = scaler\n print(self.hparams.model)\n self.model = load_model(self.hparams.model, self.hparams)\n\n # Heads\n if not self.hparams.pretraining:\n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n self.output_head = load_model(\"clf_mlp\", self.hparams)\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression':\n self.output_head = load_model(\"reg_mlp\", self.hparams)\n elif self.hparams.use_contrastive:\n self.output_head = load_model(\"emb_mlp\", self.hparams)\n else:\n raise NotImplementedError(\"output head should be defined\")\n\n self.metric = Metrics()\n\n if self.hparams.adjust_thresh:\n self.threshold = 0\n\n def forward(self, x):\n return self.output_head(self.model(x))\n \n def augment(self, img):\n\n B, C, H, W, D, T = img.shape\n\n device = img.device\n img = rearrange(img, 'b c h w d t -> b t c h w d')\n\n rand_affine = monai_t.RandAffine(\n prob=1.0,\n # 0.175 rad = 10 degrees\n rotate_range=(0.175, 0.175, 0.175),\n scale_range = (0.1, 0.1, 0.1),\n mode = \"bilinear\",\n padding_mode = \"border\",\n device = device\n )\n rand_noise = monai_t.RandGaussianNoise(prob=0.3, std=0.1)\n rand_smooth = monai_t.RandGaussianSmooth(sigma_x=(0.0, 0.5), sigma_y=(0.0, 0.5), sigma_z=(0.0, 0.5), prob=0.1)\n if self.hparams.augment_only_intensity:\n comp = monai_t.Compose([rand_noise, rand_smooth])\n else:\n comp = monai_t.Compose([rand_affine, rand_noise, rand_smooth]) \n\n for b in range(B):\n aug_seed = torch.randint(0, 10000000, (1,)).item()\n # set augmentation seed to be the same for all time steps\n for t in range(T):\n if self.hparams.augment_only_affine:\n rand_affine.set_random_state(seed=aug_seed)\n img[b, t, :, :, :, :] = rand_affine(img[b, t, :, :, :, :])\n else:\n comp.set_random_state(seed=aug_seed)\n img[b, t, :, :, :, :] = comp(img[b, t, :, :, :, :])\n\n img = rearrange(img, 'b t c h w d -> b c h w d t')\n\n return img\n \n def _compute_logits(self, batch, augment_during_training=None):\n fmri, subj, target_value, tr, sex = batch.values()\n \n if augment_during_training:\n fmri = self.augment(fmri)\n\n feature = self.model(fmri)\n\n # Classification task\n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n logits = self.output_head(feature).squeeze() #self.clf(feature).squeeze()\n target = target_value.float().squeeze()\n # Regression task\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression':\n # target_mean, target_std = self.determine_target_mean_std()\n logits = self.output_head(feature) # (batch,1) or # tuple((batch,1), (batch,1))\n unnormalized_target = target_value.float() # (batch,1)\n if self.hparams.label_scaling_method == 'standardization': # default\n target = (unnormalized_target - self.scaler.mean_[0]) / (self.scaler.scale_[0])\n elif self.hparams.label_scaling_method == 'minmax':\n target = (unnormalized_target - self.scaler.data_min_[0]) / (self.scaler.data_max_[0] - self.scaler.data_min_[0])\n \n return subj, logits, target\n \n def _calculate_loss(self, batch, mode):\n if self.hparams.pretraining:\n fmri, subj, target_value, tr, sex = batch.values()\n \n cond1 = (self.hparams.in_chans == 1 and not self.hparams.with_voxel_norm)\n assert cond1, \"Wrong combination of options\"\n loss = 0\n\n if self.hparams.use_contrastive:\n assert self.hparams.contrastive_type != \"none\", \"Contrastive type not specified\"\n\n # B, C, H, W, D, T = image shape\n y, diff_y = fmri\n\n batch_size = y.shape[0]\n if (len(subj) != len(tuple(subj))) and mode == 'train':\n print('Some sub-sequences in a batch came from the same subject!')\n criterion = NTXentLoss(device='cuda', batch_size=batch_size,\n temperature=self.hparams.temperature,\n use_cosine_similarity=True).cuda()\n criterion_ll = NTXentLoss(device='cuda', batch_size=2,\n temperature=self.hparams.temperature,\n use_cosine_similarity=True).cuda()\n \n # type 1: IC\n # type 2: LL\n # type 3: IC + LL\n if self.hparams.contrastive_type in [1, 3]:\n out_global_1 = self.output_head(self.model(self.augment(y)),\"g\")\n out_global_2 = self.output_head(self.model(self.augment(diff_y)),\"g\")\n ic_loss = criterion(out_global_1, out_global_2)\n loss += ic_loss\n\n if self.hparams.contrastive_type in [2, 3]:\n out_local_1 = []\n out_local_2 = []\n out_local_swin1 = self.model(self.augment(y))\n out_local_swin2 = self.model(self.augment(y))\n out_local_1.append(self.output_head(out_local_swin1, \"l\"))\n out_local_2.append(self.output_head(out_local_swin2, \"l\"))\n\n out_local_swin1 = self.model(self.augment(diff_y))\n out_local_swin2 = self.model(self.augment(diff_y))\n out_local_1.append(self.output_head(out_local_swin1, \"l\"))\n out_local_2.append(self.output_head(out_local_swin2, \"l\"))\n\n ll_loss = 0\n # loop over batch size\n for i in range(out_local_1[0].shape[0]):\n # out_local shape should be: BS, n_local_clips, D\n ll_loss += criterion_ll(torch.stack(out_local_1, dim=1)[i],\n torch.stack(out_local_2, dim=1)[i])\n loss += ll_loss\n\n result_dict = {\n f\"{mode}_loss\": loss,\n } \n else:\n subj, logits, target = self._compute_logits(batch, augment_during_training = self.hparams.augment_during_training)\n\n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n loss = F.binary_cross_entropy_with_logits(logits, target) # target is float\n acc = self.metric.get_accuracy_binary(logits, target.float().squeeze())\n result_dict = {\n f\"{mode}_loss\": loss,\n f\"{mode}_acc\": acc,\n }\n\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression':\n loss = F.mse_loss(logits.squeeze(), target.squeeze())\n l1 = F.l1_loss(logits.squeeze(), target.squeeze())\n result_dict = {\n f\"{mode}_loss\": loss,\n f\"{mode}_mse\": loss,\n f\"{mode}_l1_loss\": l1\n }\n self.log_dict(result_dict, prog_bar=True, sync_dist=False, add_dataloader_idx=False, on_step=True, on_epoch=True, batch_size=self.hparams.batch_size) # batch_size = batch_size\n return loss\n\n def _evaluate_metrics(self, subj_array, total_out, mode):\n # print('total_out.device',total_out.device)\n # (total iteration/world_size) numbers of samples are passed into _evaluate_metrics.\n subjects = np.unique(subj_array)\n \n subj_avg_logits = []\n subj_targets = []\n for subj in subjects:\n #print('total_out.shape:',total_out.shape) # total_out.shape: torch.Size([16, 2])\n subj_logits = total_out[subj_array == subj,0] \n subj_avg_logits.append(torch.mean(subj_logits).item())\n subj_targets.append(total_out[subj_array == subj,1][0].item())\n subj_avg_logits = torch.tensor(subj_avg_logits, device = total_out.device) \n subj_targets = torch.tensor(subj_targets, device = total_out.device) \n \n \n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n if self.hparams.adjust_thresh:\n # move threshold to maximize balanced accuracy\n best_bal_acc = 0\n best_thresh = 0\n for thresh in np.arange(-5, 5, 0.01):\n bal_acc = balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=thresh).int().cpu())\n if bal_acc > best_bal_acc:\n best_bal_acc = bal_acc\n best_thresh = thresh\n self.log(f\"{mode}_best_thresh\", best_thresh, sync_dist=True)\n self.log(f\"{mode}_best_balacc\", best_bal_acc, sync_dist=True)\n fpr, tpr, thresholds = roc_curve(subj_targets.cpu(), subj_avg_logits.cpu())\n idx = np.argmax(tpr - fpr)\n youden_thresh = thresholds[idx]\n acc_func = BinaryAccuracy().to(total_out.device)\n self.log(f\"{mode}_youden_thresh\", youden_thresh, sync_dist=True)\n self.log(f\"{mode}_youden_balacc\", balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=youden_thresh).int().cpu()), sync_dist=True)\n\n if mode == 'valid':\n self.threshold = youden_thresh\n elif mode == 'test':\n bal_acc = balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=self.threshold).int().cpu())\n self.log(f\"{mode}_balacc_from_valid_thresh\", bal_acc, sync_dist=True)\n else:\n acc_func = BinaryAccuracy().to(total_out.device)\n \n auroc_func = BinaryAUROC().to(total_out.device)\n acc = acc_func((subj_avg_logits >= 0).int(), subj_targets)\n #print((subj_avg_logits>=0).int().cpu())\n #print(subj_targets.cpu())\n bal_acc_sk = balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=0).int().cpu())\n auroc = auroc_func(torch.sigmoid(subj_avg_logits), subj_targets)\n\n self.log(f\"{mode}_acc\", acc, sync_dist=True)\n self.log(f\"{mode}_balacc\", bal_acc_sk, sync_dist=True)\n self.log(f\"{mode}_AUROC\", auroc, sync_dist=True)\n\n # regression target is normalized\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression': \n mse = F.mse_loss(subj_avg_logits, subj_targets)\n mae = F.l1_loss(subj_avg_logits, subj_targets)\n \n # reconstruct to original scale\n if self.hparams.label_scaling_method == 'standardization': # default\n adjusted_mse = F.mse_loss(subj_avg_logits * self.scaler.scale_[0] + self.scaler.mean_[0], subj_targets * self.scaler.scale_[0] + self.scaler.mean_[0])\n adjusted_mae = F.l1_loss(subj_avg_logits * self.scaler.scale_[0] + self.scaler.mean_[0], subj_targets * self.scaler.scale_[0] + self.scaler.mean_[0])\n elif self.hparams.label_scaling_method == 'minmax':\n adjusted_mse = F.mse_loss(subj_avg_logits * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0], subj_targets * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0])\n adjusted_mae = F.l1_loss(subj_avg_logits * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0], subj_targets * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0])\n pearson = PearsonCorrCoef().to(total_out.device)\n prearson_coef = pearson(subj_avg_logits, subj_targets)\n \n self.log(f\"{mode}_corrcoef\", prearson_coef, sync_dist=True)\n self.log(f\"{mode}_mse\", mse, sync_dist=True)\n self.log(f\"{mode}_mae\", mae, sync_dist=True)\n self.log(f\"{mode}_adjusted_mse\", adjusted_mse, sync_dist=True) \n self.log(f\"{mode}_adjusted_mae\", adjusted_mae, sync_dist=True) \n\n def training_step(self, batch, batch_idx):\n loss = self._calculate_loss(batch, mode=\"train\")\n return loss\n\n def validation_step(self, batch, batch_idx, dataloader_idx):\n if self.hparams.pretraining:\n if dataloader_idx == 0:\n self._calculate_loss(batch, mode=\"valid\")\n else:\n self._calculate_loss(batch, mode=\"test\")\n else:\n subj, logits, target = self._compute_logits(batch)\n if self.hparams.downstream_task_type == 'multi_task':\n output = torch.stack([logits[1].squeeze(), target], dim=1) # logits[1] : regression head\n else:\n output = torch.stack([logits.squeeze(), target.squeeze()], dim=1)\n return (subj, output)\n\n def validation_epoch_end(self, outputs):\n # called at the end of the validation epoch\n # outputs is an array with what you returned in validation_step for each batch\n # outputs = [{'loss': batch_0_loss}, {'loss': batch_1_loss}, ..., {'loss': batch_n_loss}] \n if not self.hparams.pretraining:\n outputs_valid = outputs[0]\n outputs_test = outputs[1]\n subj_valid = []\n subj_test = []\n out_valid_list = []\n out_test_list = []\n for subj, out in outputs_valid:\n subj_valid += subj\n out_valid_list.append(out.detach())\n for subj, out in outputs_test:\n subj_test += subj\n out_test_list.append(out.detach())\n subj_valid = np.array(subj_valid)\n subj_test = np.array(subj_test)\n total_out_valid = torch.cat(out_valid_list, dim=0)\n total_out_test = torch.cat(out_test_list, dim=0)\n\n # save model predictions if it is needed for future analysis\n # self._save_predictions(subj_valid,total_out_valid,mode=\"valid\")\n # self._save_predictions(subj_test,total_out_test, mode=\"test\") \n \n # evaluate \n self._evaluate_metrics(subj_valid, total_out_valid, mode=\"valid\")\n self._evaluate_metrics(subj_test, total_out_test, mode=\"test\")\n \n # If you use loggers other than Neptune you may need to modify this\n def _save_predictions(self,total_subjs,total_out, mode):\n self.subject_accuracy = {}\n for subj, output in zip(total_subjs,total_out):\n if self.hparams.downstream_task == 'sex':\n score = torch.sigmoid(output[0]).item()\n else:\n score = output[0].item()\n\n if subj not in self.subject_accuracy:\n self.subject_accuracy[subj] = {'score': [score], 'mode':mode, 'truth':output[1], 'count':1}\n else:\n self.subject_accuracy[subj]['score'].append(score)\n self.subject_accuracy[subj]['count']+=1\n \n if self.hparams.strategy == None : \n pass\n elif 'ddp' in self.hparams.strategy and len(self.subject_accuracy) > 0:\n world_size = torch.distributed.get_world_size()\n total_subj_accuracy = [None for _ in range(world_size)]\n torch.distributed.all_gather_object(total_subj_accuracy,self.subject_accuracy) # gather and broadcast to whole ranks \n accuracy_dict = {}\n for dct in total_subj_accuracy:\n for subj, metric_dict in dct.items():\n if subj not in accuracy_dict:\n accuracy_dict[subj] = metric_dict\n else:\n accuracy_dict[subj]['score']+=metric_dict['score']\n accuracy_dict[subj]['count']+=metric_dict['count']\n self.subject_accuracy = accuracy_dict\n if self.trainer.is_global_zero:\n for subj_name,subj_dict in self.subject_accuracy.items():\n subj_pred = np.mean(subj_dict['score'])\n subj_error = np.std(subj_dict['score'])\n subj_truth = subj_dict['truth'].item()\n subj_count = subj_dict['count']\n subj_mode = subj_dict['mode'] # train, val, test\n\n # only save samples at rank 0 (total iterations/world_size numbers are saved) \n os.makedirs(os.path.join('predictions',self.hparams.id), exist_ok=True)\n with open(os.path.join('predictions',self.hparams.id,'iter_{}.txt'.format(self.current_epoch)),'a+') as f:\n f.write('subject:{} ({})\\ncount: {} outputs: {:.4f}\\u00B1{:.4f} - truth: {}\\n'.format(subj_name,subj_mode,subj_count,subj_pred,subj_error,subj_truth))\n\n with open(os.path.join('predictions',self.hparams.id,'iter_{}.pkl'.format(self.current_epoch)),'wb') as fw:\n pickle.dump(self.subject_accuracy, fw)\n\n def test_step(self, batch, batch_idx):\n subj, logits, target = self._compute_logits(batch)\n output = torch.stack([logits.squeeze(), target.squeeze()], dim=1)\n return (subj, output)\n\n def test_epoch_end(self, outputs):\n if not self.hparams.pretraining:\n subj_test = [] \n out_test_list = []\n for subj, out in outputs:\n subj_test += subj\n out_test_list.append(out.detach())\n subj_test = np.array(subj_test)\n total_out_test = torch.cat(out_test_list, dim=0)\n # self._save_predictions(subj_test, total_out_test, mode=\"test\") \n self._evaluate_metrics(subj_test, total_out_test, mode=\"test\")\n \n def on_train_epoch_start(self) -> None:\n self.starter, self.ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n self.total_time = 0\n self.repetitions = 200\n self.gpu_warmup = 50\n self.timings=np.zeros((self.repetitions,1))\n return super().on_train_epoch_start()\n \n def on_train_batch_start(self, batch, batch_idx):\n if self.hparams.scalability_check:\n if batch_idx < self.gpu_warmup:\n pass\n elif (batch_idx-self.gpu_warmup) < self.repetitions:\n self.starter.record()\n return super().on_train_batch_start(batch, batch_idx)\n \n def on_train_batch_end(self, out, batch, batch_idx):\n if self.hparams.scalability_check:\n if batch_idx < self.gpu_warmup:\n pass\n elif (batch_idx-self.gpu_warmup) < self.repetitions:\n self.ender.record()\n torch.cuda.synchronize()\n curr_time = self.starter.elapsed_time(self.ender) / 1000\n self.total_time += curr_time\n self.timings[batch_idx-self.gpu_warmup] = curr_time\n elif (batch_idx-self.gpu_warmup) == self.repetitions:\n mean_syn = np.mean(self.timings)\n std_syn = np.std(self.timings)\n \n Throughput = (self.repetitions*self.hparams.batch_size*int(self.hparams.num_nodes) * int(self.hparams.devices))/self.total_time\n \n self.log(f\"Throughput\", Throughput, sync_dist=False)\n self.log(f\"mean_time\", mean_syn, sync_dist=False)\n self.log(f\"std_time\", std_syn, sync_dist=False)\n print('mean_syn:',mean_syn)\n print('std_syn:',std_syn)\n \n return super().on_train_batch_end(out, batch, batch_idx)\n\n\n # def on_before_optimizer_step(self, optimizer, optimizer_idx: int) -> None:\n\n def configure_optimizers(self):\n if self.hparams.optimizer == \"AdamW\":\n optim = torch.optim.AdamW(\n self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay\n )\n elif self.hparams.optimizer == \"SGD\":\n optim = torch.optim.SGD(\n self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay, momentum=self.hparams.momentum\n )\n else:\n print(\"Error: Input a correct optimizer name (default: AdamW)\")\n \n if self.hparams.use_scheduler:\n print()\n print(\"training steps: \" + str(self.trainer.estimated_stepping_batches))\n print(\"using scheduler\")\n print()\n total_iterations = self.trainer.estimated_stepping_batches # ((number of samples/batch size)/number of gpus) * num_epochs\n gamma = self.hparams.gamma\n base_lr = self.hparams.learning_rate\n warmup = int(total_iterations * 0.05) # adjust the length of warmup here.\n T_0 = int(self.hparams.cycle * total_iterations)\n T_mult = 1\n \n sche = CosineAnnealingWarmUpRestarts(optim, first_cycle_steps=T_0, cycle_mult=T_mult, max_lr=base_lr,min_lr=1e-9, warmup_steps=warmup, gamma=gamma)\n print('total iterations:',self.trainer.estimated_stepping_batches * self.hparams.max_epochs)\n\n scheduler = {\n \"scheduler\": sche,\n \"name\": \"lr_history\",\n \"interval\": \"step\",\n }\n\n return [optim], [scheduler]\n else:\n return optim\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False, formatter_class=ArgumentDefaultsHelpFormatter)\n group = parser.add_argument_group(\"Default classifier\")\n # training related\n group.add_argument(\"--grad_clip\", action='store_true', help=\"whether to use gradient clipping\")\n group.add_argument(\"--optimizer\", type=str, default=\"AdamW\", help=\"which optimizer to use [AdamW, SGD]\")\n group.add_argument(\"--use_scheduler\", action='store_true', help=\"whether to use scheduler\")\n group.add_argument(\"--weight_decay\", type=float, default=0.01, help=\"weight decay for optimizer\")\n group.add_argument(\"--learning_rate\", type=float, default=1e-3, help=\"learning rate for optimizer\")\n group.add_argument(\"--momentum\", type=float, default=0, help=\"momentum for SGD\")\n group.add_argument(\"--gamma\", type=float, default=1.0, help=\"decay for exponential LR scheduler\")\n group.add_argument(\"--cycle\", type=float, default=0.3, help=\"cycle size for CosineAnnealingWarmUpRestarts\")\n group.add_argument(\"--milestones\", nargs=\"+\", default=[100, 150], type=int, help=\"lr scheduler\")\n group.add_argument(\"--adjust_thresh\", action='store_true', help=\"whether to adjust threshold for valid/test\")\n \n # pretraining-related\n group.add_argument(\"--use_contrastive\", action='store_true', help=\"whether to use contrastive learning (specify --contrastive_type argument as well)\")\n group.add_argument(\"--contrastive_type\", default=0, type=int, help=\"combination of contrastive losses to use [1: Use the Instance contrastive loss function, 2: Use the local-local temporal contrastive loss function, 3: Use the sum of both loss functions]\")\n group.add_argument(\"--pretraining\", action='store_true', help=\"whether to use pretraining\")\n group.add_argument(\"--augment_during_training\", action='store_true', help=\"whether to augment input images during training\")\n group.add_argument(\"--augment_only_affine\", action='store_true', help=\"whether to only apply affine augmentation\")\n group.add_argument(\"--augment_only_intensity\", action='store_true', help=\"whether to only apply intensity augmentation\")\n group.add_argument(\"--temperature\", default=0.1, type=float, help=\"temperature for NTXentLoss\")\n \n # model related\n group.add_argument(\"--model\", type=str, default=\"none\", help=\"which model to be used\")\n group.add_argument(\"--in_chans\", type=int, default=1, help=\"Channel size of input image\")\n group.add_argument(\"--embed_dim\", type=int, default=24, help=\"embedding size (recommend to use 24, 36, 48)\")\n group.add_argument(\"--window_size\", nargs=\"+\", default=[4, 4, 4, 4], type=int, help=\"window size from the second layers\")\n group.add_argument(\"--first_window_size\", nargs=\"+\", default=[2, 2, 2, 2], type=int, help=\"first window size\")\n group.add_argument(\"--patch_size\", nargs=\"+\", default=[6, 6, 6, 1], type=int, help=\"patch size\")\n group.add_argument(\"--depths\", nargs=\"+\", default=[2, 2, 6, 2], type=int, help=\"depth of layers in each stage\")\n group.add_argument(\"--num_heads\", nargs=\"+\", default=[3, 6, 12, 24], type=int, help=\"The number of heads for each attention layer\")\n group.add_argument(\"--c_multiplier\", type=int, default=2, help=\"channel multiplier for Swin Transformer architecture\")\n group.add_argument(\"--last_layer_full_MSA\", type=str2bool, default=False, help=\"whether to use full-scale multi-head self-attention at the last layers\")\n group.add_argument(\"--clf_head_version\", type=str, default=\"v1\", help=\"clf head version, v2 has a hidden layer\")\n group.add_argument(\"--attn_drop_rate\", type=float, default=0, help=\"dropout rate of attention layers\")\n\n # others\n group.add_argument(\"--scalability_check\", action='store_true', help=\"whether to check scalability\")\n group.add_argument(\"--process_code\", default=None, help=\"Slurm code/PBS code. Use this argument if you want to save process codes to your log\")\n \n return parser" }, { "identifier": "fMRIDataModule", "path": "project/module/utils/data_module.py", "snippet": "class fMRIDataModule(pl.LightningDataModule):\n def __init__(self, **kwargs):\n super().__init__()\n self.save_hyperparameters()\n\n # generate splits folder\n if self.hparams.pretraining:\n split_dir_path = f'./data/splits/{self.hparams.dataset_name}/pretraining'\n else:\n split_dir_path = f'./data/splits/{self.hparams.dataset_name}'\n os.makedirs(split_dir_path, exist_ok=True)\n self.split_file_path = os.path.join(split_dir_path, f\"split_fixed_{self.hparams.dataset_split_num}.txt\")\n \n self.setup()\n\n #pl.seed_everything(seed=self.hparams.data_seed)\n\n def get_dataset(self):\n if self.hparams.dataset_name == \"Dummy\":\n return Dummy\n elif self.hparams.dataset_name == \"S1200\":\n return S1200\n elif self.hparams.dataset_name == \"ABCD\":\n return ABCD\n elif self.hparams.dataset_name == 'UKB':\n return UKB\n else:\n raise NotImplementedError\n\n def convert_subject_list_to_idx_list(self, train_names, val_names, test_names, subj_list):\n #subj_idx = np.array([str(x[0]) for x in subj_list])\n subj_idx = np.array([str(x[1]) for x in subj_list])\n S = np.unique([x[1] for x in subj_list])\n # print(S)\n print('unique subjects:',len(S)) \n train_idx = np.where(np.in1d(subj_idx, train_names))[0].tolist()\n val_idx = np.where(np.in1d(subj_idx, val_names))[0].tolist()\n test_idx = np.where(np.in1d(subj_idx, test_names))[0].tolist()\n return train_idx, val_idx, test_idx\n \n def save_split(self, sets_dict):\n with open(self.split_file_path, \"w+\") as f:\n for name, subj_list in sets_dict.items():\n f.write(name + \"\\n\")\n for subj_name in subj_list:\n f.write(str(subj_name) + \"\\n\")\n \n def determine_split_randomly(self, S):\n S = list(S.keys())\n S_train = int(len(S) * self.hparams.train_split)\n S_val = int(len(S) * self.hparams.val_split)\n S_train = np.random.choice(S, S_train, replace=False)\n remaining = np.setdiff1d(S, S_train) # np.setdiff1d(np.arange(S), S_train)\n S_val = np.random.choice(remaining, S_val, replace=False)\n S_test = np.setdiff1d(S, np.concatenate([S_train, S_val])) # np.setdiff1d(np.arange(S), np.concatenate([S_train, S_val]))\n # train_idx, val_idx, test_idx = self.convert_subject_list_to_idx_list(S_train, S_val, S_test, self.subject_list)\n self.save_split({\"train_subjects\": S_train, \"val_subjects\": S_val, \"test_subjects\": S_test})\n return S_train, S_val, S_test\n \n def load_split(self):\n subject_order = open(self.split_file_path, \"r\").readlines()\n subject_order = [x[:-1] for x in subject_order]\n train_index = np.argmax([\"train\" in line for line in subject_order])\n val_index = np.argmax([\"val\" in line for line in subject_order])\n test_index = np.argmax([\"test\" in line for line in subject_order])\n train_names = subject_order[train_index + 1 : val_index]\n val_names = subject_order[val_index + 1 : test_index]\n test_names = subject_order[test_index + 1 :]\n return train_names, val_names, test_names\n\n def prepare_data(self):\n # This function is only called at global rank==0\n return\n \n # filter subjects with metadata and pair subject names with their target values (+ sex)\n def make_subject_dict(self):\n # output: {'subj1':[target1,target2],'subj2':[target1,target2]...}\n img_root = os.path.join(self.hparams.image_path, 'img')\n final_dict = dict()\n if self.hparams.dataset_name == \"S1200\":\n subject_list = os.listdir(img_root)\n meta_data = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"HCP_1200_gender.csv\"))\n meta_data_residual = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"HCP_1200_precise_age.csv\"))\n meta_data_all = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"HCP_1200_all.csv\"))\n if self.hparams.downstream_task == 'sex': task_name = 'Gender'\n elif self.hparams.downstream_task == 'age': task_name = 'age'\n elif self.hparams.downstream_task == 'int_total': task_name = 'CogTotalComp_AgeAdj'\n else: raise NotImplementedError()\n\n if self.hparams.downstream_task == 'sex':\n meta_task = meta_data[['Subject',task_name]].dropna()\n elif self.hparams.downstream_task == 'age':\n meta_task = meta_data_residual[['subject',task_name,'sex']].dropna()\n #rename column subject to Subject\n meta_task = meta_task.rename(columns={'subject': 'Subject'})\n elif self.hparams.downstream_task == 'int_total':\n meta_task = meta_data[['Subject',task_name,'Gender']].dropna() \n \n for subject in subject_list:\n if int(subject) in meta_task['Subject'].values:\n if self.hparams.downstream_task == 'sex':\n target = meta_task[meta_task[\"Subject\"]==int(subject)][task_name].values[0]\n target = 1 if target == \"M\" else 0\n sex = target\n elif self.hparams.downstream_task == 'age':\n target = meta_task[meta_task[\"Subject\"]==int(subject)][task_name].values[0]\n sex = meta_task[meta_task[\"Subject\"]==int(subject)][\"sex\"].values[0]\n sex = 1 if sex == \"M\" else 0\n elif self.hparams.downstream_task == 'int_total':\n target = meta_task[meta_task[\"Subject\"]==int(subject)][task_name].values[0]\n sex = meta_task[meta_task[\"Subject\"]==int(subject)][\"Gender\"].values[0]\n sex = 1 if sex == \"M\" else 0\n final_dict[subject]=[sex,target]\n \n elif self.hparams.dataset_name == \"ABCD\":\n subject_list = [subj[4:] for subj in os.listdir(img_root)]\n \n meta_data = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"ABCD_phenotype_total.csv\"))\n if self.hparams.downstream_task == 'sex': task_name = 'sex'\n elif self.hparams.downstream_task == 'age': task_name = 'age'\n elif self.hparams.downstream_task == 'int_total': task_name = 'nihtbx_totalcomp_uncorrected'\n else: raise ValueError('downstream task not supported')\n \n if self.hparams.downstream_task == 'sex':\n meta_task = meta_data[['subjectkey',task_name]].dropna()\n else:\n meta_task = meta_data[['subjectkey',task_name,'sex']].dropna()\n \n for subject in subject_list:\n if subject in meta_task['subjectkey'].values:\n target = meta_task[meta_task[\"subjectkey\"]==subject][task_name].values[0]\n sex = meta_task[meta_task[\"subjectkey\"]==subject][\"sex\"].values[0]\n final_dict[subject]=[sex,target]\n \n elif self.hparams.dataset_name == \"UKB\":\n if self.hparams.downstream_task == 'sex': task_name = 'sex'\n elif self.hparams.downstream_task == 'age': task_name = 'age'\n elif self.hparams.downstream_task == 'int_fluid' : task_name = 'fluid'\n else: raise ValueError('downstream task not supported')\n \n meta_data = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"UKB_phenotype_gps_fluidint.csv\"))\n if task_name == 'sex':\n meta_task = meta_data[['eid',task_name]].dropna()\n else:\n meta_task = meta_data[['eid',task_name,'sex']].dropna()\n\n for subject in os.listdir(img_root):\n if subject.endswith('20227_2_0') and (int(subject[:7]) in meta_task['eid'].values):\n target = meta_task[meta_task[\"eid\"]==int(subject[:7])][task_name].values[0]\n sex = meta_task[meta_task[\"eid\"]==int(subject[:7])].values[0]\n final_dict[str(subject[:7])] = [sex,target]\n else:\n continue \n \n return final_dict\n\n def setup(self, stage=None):\n # this function will be called at each devices\n Dataset = self.get_dataset()\n params = {\n \"root\": self.hparams.image_path,\n \"sequence_length\": self.hparams.sequence_length,\n \"contrastive\":self.hparams.use_contrastive,\n \"contrastive_type\":self.hparams.contrastive_type,\n \"stride_between_seq\": self.hparams.stride_between_seq,\n \"stride_within_seq\": self.hparams.stride_within_seq,\n \"with_voxel_norm\": self.hparams.with_voxel_norm,\n \"downstream_task\": self.hparams.downstream_task,\n \"shuffle_time_sequence\": self.hparams.shuffle_time_sequence,\n \"input_type\": self.hparams.input_type,\n \"label_scaling_method\" : self.hparams.label_scaling_method,\n \"dtype\":'float16'}\n \n subject_dict = self.make_subject_dict()\n if os.path.exists(self.split_file_path):\n train_names, val_names, test_names = self.load_split()\n else:\n train_names, val_names, test_names = self.determine_split_randomly(subject_dict)\n \n if self.hparams.bad_subj_path:\n bad_subjects = open(self.hparams.bad_subj_path, \"r\").readlines()\n for bad_subj in bad_subjects:\n bad_subj = bad_subj.strip()\n if bad_subj in list(subject_dict.keys()):\n print(f'removing bad subject: {bad_subj}')\n del subject_dict[bad_subj]\n \n if self.hparams.limit_training_samples:\n train_names = np.random.choice(train_names, size=self.hparams.limit_training_samples, replace=False, p=None)\n \n train_dict = {key: subject_dict[key] for key in train_names if key in subject_dict}\n val_dict = {key: subject_dict[key] for key in val_names if key in subject_dict}\n test_dict = {key: subject_dict[key] for key in test_names if key in subject_dict}\n \n self.train_dataset = Dataset(**params,subject_dict=train_dict,use_augmentations=False, train=True)\n # load train mean/std of target labels to val/test dataloader\n self.val_dataset = Dataset(**params,subject_dict=val_dict,use_augmentations=False,train=False) \n self.test_dataset = Dataset(**params,subject_dict=test_dict,use_augmentations=False,train=False) \n \n print(\"number of train_subj:\", len(train_dict))\n print(\"number of val_subj:\", len(val_dict))\n print(\"number of test_subj:\", len(test_dict))\n print(\"length of train_idx:\", len(self.train_dataset.data))\n print(\"length of val_idx:\", len(self.val_dataset.data)) \n print(\"length of test_idx:\", len(self.test_dataset.data))\n \n # DistributedSampler is internally called in pl.Trainer\n def get_params(train):\n return {\n \"batch_size\": self.hparams.batch_size if train else self.hparams.eval_batch_size,\n \"num_workers\": self.hparams.num_workers,\n \"drop_last\": True,\n \"pin_memory\": False,\n \"persistent_workers\": False if self.hparams.dataset_name == 'Dummy' else (train and (self.hparams.strategy == 'ddp')),\n \"shuffle\": train\n }\n self.train_loader = DataLoader(self.train_dataset, **get_params(train=True))\n self.val_loader = DataLoader(self.val_dataset, **get_params(train=False))\n self.test_loader = DataLoader(self.test_dataset, **get_params(train=False))\n \n\n def train_dataloader(self):\n return self.train_loader\n\n def val_dataloader(self):\n # return self.val_loader\n # currently returns validation and test set to track them during training\n return [self.val_loader, self.test_loader]\n\n def test_dataloader(self):\n return self.test_loader\n\n def predict_dataloader(self):\n return self.test_dataloader()\n\n @classmethod\n def add_data_specific_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:\n parser = ArgumentParser(parents=[parent_parser], add_help=True, formatter_class=ArgumentDefaultsHelpFormatter)\n group = parser.add_argument_group(\"DataModule arguments\")\n group.add_argument(\"--dataset_split_num\", type=int, default=1) # dataset split, choose from 1, 2, or 3\n group.add_argument(\"--label_scaling_method\", default=\"standardization\", choices=[\"minmax\",\"standardization\"], help=\"label normalization strategy for a regression task (mean and std are automatically calculated using train set)\")\n group.add_argument(\"--image_path\", default=None, help=\"path to image datasets preprocessed for SwiFT\")\n group.add_argument(\"--bad_subj_path\", default=None, help=\"path to txt file that contains subjects with bad fMRI quality\")\n group.add_argument(\"--input_type\", default=\"rest\",choices=['rest','task'],help='refer to datasets.py')\n group.add_argument(\"--train_split\", default=0.7, type=float)\n group.add_argument(\"--val_split\", default=0.15, type=float)\n group.add_argument(\"--batch_size\", type=int, default=4)\n group.add_argument(\"--eval_batch_size\", type=int, default=16)\n group.add_argument(\"--img_size\", nargs=\"+\", default=[96, 96, 96, 20], type=int, help=\"image size (adjust the fourth dimension according to your --sequence_length argument)\")\n group.add_argument(\"--sequence_length\", type=int, default=20)\n group.add_argument(\"--stride_between_seq\", type=int, default=1, help=\"skip some fMRI volumes between fMRI sub-sequences\")\n group.add_argument(\"--stride_within_seq\", type=int, default=1, help=\"skip some fMRI volumes within fMRI sub-sequences\")\n group.add_argument(\"--num_workers\", type=int, default=8)\n group.add_argument(\"--with_voxel_norm\", type=str2bool, default=False)\n group.add_argument(\"--shuffle_time_sequence\", action='store_true')\n group.add_argument(\"--limit_training_samples\", type=int, default=None, help=\"use if you want to limit training samples\")\n return parser" } ]
import torch import torch.nn as nn import torch.nn.functional as F import os import json import numpy as np import torchvision import matplotlib.pyplot as plt from PIL import Image from tqdm import tqdm from matplotlib.colors import LinearSegmentedColormap from torchvision import models from torchvision import transforms from captum.attr import IntegratedGradients from captum.attr import GradientShap from captum.attr import Occlusion from captum.attr import NoiseTunnel from captum.attr import visualization as viz from matplotlib.colors import LogNorm from project.module.models.swin4d_transformer_ver7 import SwinTransformer4D from project.module.pl_classifier import LitClassifier from project.module.utils.data_module import fMRIDataModule from pathlib import Path
12,871
save_dir = # write path to save_dir jobid = # write project number neptune_project_id = # write project id. ex)user_id/project_name for i in Path(f'SwiFT/output/{neptune_project_id}/RSTOT-{jobid}/').glob('checkpt*'): ckpt_path = i ckpt = torch.load(ckpt_path, map_location='cuda:0' if torch.cuda.is_available() else 'cpu') ckpt['hyper_parameters']['image_path'] = # write path to MNI_to_TRs folder ckpt['hyper_parameters']['default_root_dir'] = # write path to use default_root_dir ckpt['hyper_parameters']['shuffle_time_sequence'] = False ckpt['hyper_parameters']['time_as_channel'] = False ckpt['hyper_parameters']['eval_batch_size'] = 1 args = ckpt['hyper_parameters']
save_dir = # write path to save_dir jobid = # write project number neptune_project_id = # write project id. ex)user_id/project_name for i in Path(f'SwiFT/output/{neptune_project_id}/RSTOT-{jobid}/').glob('checkpt*'): ckpt_path = i ckpt = torch.load(ckpt_path, map_location='cuda:0' if torch.cuda.is_available() else 'cpu') ckpt['hyper_parameters']['image_path'] = # write path to MNI_to_TRs folder ckpt['hyper_parameters']['default_root_dir'] = # write path to use default_root_dir ckpt['hyper_parameters']['shuffle_time_sequence'] = False ckpt['hyper_parameters']['time_as_channel'] = False ckpt['hyper_parameters']['eval_batch_size'] = 1 args = ckpt['hyper_parameters']
model = LitClassifier(**args)
1
2023-10-28 09:26:03+00:00
16k
TheCompAce/ShellSpeak
main.py
[ { "identifier": "VectorDatabase", "path": "modules/vectorDatabase.py", "snippet": "class VectorDatabase:\n def __init__(self, path, name):\n self.path = path\n self.name = name\n self.db_path = os.path.join(path, f'{name}.db')\n self.model_path = os.path.join(path, f'{name}.bin')\n \n # Ensure the path exists\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Set up database and model connections\n self.conn = self.initialize_db()\n self.model = self.initialize_model()\n \n def initialize_db(self):\n try:\n conn = sqlite3.connect(self.db_path)\n c = conn.cursor()\n \n c.execute('CREATE TABLE IF NOT EXISTS responses (id INTEGER PRIMARY KEY, response TEXT, response_raw TEXT, trained BOOLEAN DEFAULT 0)')\n c.execute('CREATE INDEX IF NOT EXISTS idx_responses_trained ON responses (trained)') # Index on trained field\n c.execute('CREATE TABLE IF NOT EXISTS vector_data (id INTEGER PRIMARY KEY, vector BLOB, response_id INTEGER, FOREIGN KEY(response_id) REFERENCES responses(id))')\n c.execute('CREATE INDEX IF NOT EXISTS idx_vector_data_response_id ON vector_data (response_id)') # Index on response_id field\n\n # Check if timestamp column exists\n c.execute(\"PRAGMA table_info(responses)\")\n columns = [column[1] for column in c.fetchall()]\n if 'timestamp' not in columns:\n c.execute('ALTER TABLE responses ADD COLUMN timestamp DATETIME DEFAULT CURRENT_TIMESTAMP')\n \n conn.commit()\n\n return conn # Return the connection\n except Exception as e:\n logging.exception(f\"An error occurred in initialize_db: {e}\")\n\n \n def initialize_model(self):\n try:\n # Create a new Word2Vec model if it doesn't exist\n if not os.path.exists(self.model_path):\n # Assuming sentences is your data\n # Replace the following line with your data and model parameters\n sentences = [[\"hello\", \"world\"], [\"how\", \"are\", \"you\"], [\"goodbye\", \"world\"]]\n\n model = Word2Vec(sentences, min_count=1)\n model.save(self.model_path)\n else:\n model = Word2Vec.load(self.model_path)\n\n return model # Return the model\n except Exception as e:\n logging.exception(f\"An error occurred in initialize_model: {e}\")\n\n def store_short_term_memory(self, task_id, data):\n # Convert data to a string or JSON format\n data_str = json.dumps(data)\n # Store the data as a response in the VectorDatabase\n self.vector_db.create_response(data_str)\n\n def store_long_term_memory(self, task_data):\n # Convert task_data to a string or JSON format\n task_data_str = json.dumps(task_data)\n # Store the task_data as a response in the VectorDatabase\n self.create_response(task_data_str)\n\n def ensure_connection(self):\n if self.conn is None:\n self.conn = self.initialize_db()\n if self.model is None:\n self.model = self.initialize_model()\n \n def create_response(self, response_text):\n try:\n c = self.conn.cursor()\n \n preprocess_text = self.preprocess_text(response_text)\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n c.execute('INSERT INTO responses (response, response_raw, timestamp) VALUES (?, ?, ?)', (preprocess_text, response_text, now))\n response_id = c.lastrowid\n\n # Updated word check in vocabulary\n words = preprocess_text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector = np.mean(vectors, axis=0) # Averaging vectors of the words\n vector_bytes = vector.tobytes()\n c.execute('INSERT INTO vector_data (vector, response_id) VALUES (?, ?)', (vector_bytes, response_id))\n else:\n logging.info(\"No valid words found in the response for vectorization.\")\n\n self.conn.commit()\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in create_response: {e}\")\n\n \n def search_response(self, search_text):\n c = self.conn.cursor()\n \n # Use the LIKE operator to search for the search_text in the response field\n c.execute(\"SELECT id, response FROM responses WHERE response LIKE ?\", ('%' + search_text + '%',))\n search_results = c.fetchall()\n c.close()\n return search_results\n \n def normalize_text(self, text):\n # Convert to lowercase\n text = text.lower()\n # Replace newline characters with spaces\n text = text.replace('\\\\n', ' ')\n # Remove special characters and digits using regex\n text = re.sub(r'[^a-zA-Z\\s]', ' ', text)\n # Remove extra whitespaces\n text = re.sub(r'\\s+', ' ', text).strip()\n # Tokenize the text\n tokens = text.split()\n # Remove stopwords\n tokens = [word for word in tokens if word not in stopwords.words('english')]\n # Perform stemming\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(word) for word in tokens]\n # Join tokens back into a single string\n text = ' '.join(tokens)\n return text\n\n def close_connection(self):\n \"\"\"Close the database connection gracefully.\"\"\"\n try:\n if self.conn:\n self.conn.close()\n\n self.conn = None\n except Exception as e:\n logging.exception(f\"An error occurred while closing the connection: {e}\")\n\n\n def preprocess_text(self, text):\n \"\"\"Example preprocessing function (can be expanded).\"\"\"\n # Placeholder for any preprocessing steps you want to implement\n return self.normalize_text(text)\n\n def get_vector(self, response_id):\n \"\"\"Retrieve vector data for a given response_id.\"\"\"\n c = self.conn.cursor()\n \n c.execute('SELECT vector FROM vector_data WHERE response_id = ?', (response_id,))\n vector_data = c.fetchone()\n c.close()\n \n if vector_data is None:\n error_message = f\"No vector data found for response_id {response_id}\"\n logging.error(error_message)\n raise ValueError(error_message)\n \n vector = np.frombuffer(vector_data[0], dtype=np.float32) # Assuming the vector data is stored as float32\n \n return vector\n\n \n def read_response(self, response_id):\n c = self.conn.cursor()\n \n c.execute('SELECT response FROM responses WHERE id = ?', (response_id,))\n response = c.fetchone()\n\n c.close()\n \n if response is None:\n error_message = f\"No response found for response_id {response_id}\"\n logging.error(error_message)\n raise ValueError(error_message)\n \n return response[0]\n \n def update_response(self, response_id, new_response_text):\n try:\n c = self.conn.cursor()\n \n normalized_text = self.preprocess(new_response_text)\n c.execute('UPDATE responses SET response = ? WHERE id = ?', (normalized_text, response_id))\n \n # Check if each word is in the model's vocabulary\n words = normalized_text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector = np.mean(vectors, axis=0) # Averaging vectors of the words\n vector_bytes = vector.tobytes()\n c.execute('UPDATE vector_data SET vector = ? WHERE response_id = ?', (vector_bytes, response_id))\n \n self.conn.commit()\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in update_response: {e}\")\n \n def delete_response(self, response_id):\n try:\n c = self.conn.cursor()\n \n c.execute('DELETE FROM vector_data WHERE response_id = ?', (response_id,))\n c.execute('DELETE FROM responses WHERE id = ?', (response_id,))\n \n self.conn.commit()\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in delete_response: {e}\")\n\n def train_untrained_responses(self):\n try:\n c = self.conn.cursor()\n\n c.execute(\"SELECT response FROM responses WHERE trained = 0\")\n untrained_responses = c.fetchall()\n if untrained_responses:\n sentences = [response[0].split() for response in untrained_responses]\n\n self.model.build_vocab(sentences, update=True)\n self.model.train(sentences, total_examples=len(sentences), epochs=self.model.epochs)\n\n self.model.save(self.model_path)\n\n c.execute(\"UPDATE responses SET trained = 1 WHERE trained = 0\")\n\n self.conn.commit()\n else:\n logging.info(\"No untrained responses found.\")\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in train_untrained_responses: {e}\")\n\n def needs_index_update(self):\n \"\"\"\n Check if there are any untrained responses in the database.\n If there are, it means the index needs to be updated.\n Returns True if update is needed, False otherwise.\n \"\"\"\n try:\n c = self.conn.cursor()\n c.execute(\"SELECT COUNT(*) FROM responses WHERE trained = 0\")\n count = c.fetchone()[0]\n c.close() # Manually close the cursor\n return count > 0\n except Exception as e:\n logging.exception(f\"An error occurred in needs_index_update: {e}\")\n return False # In case of an error, you might want to handle it differently\n\n \n def reset_training_status(self):\n \"\"\"Reset the trained status of all responses to untrained.\"\"\"\n try:\n c = self.conn.cursor()\n \n c.execute(\"UPDATE responses SET trained = 0\")\n \n self.conn.commit()\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in reset_training_status: {e}\")\n\n\n def search_word_vector(self, word):\n try:\n if word in self.model.wv.key_to_index:\n similar_words = self.model.wv.similar_by_word(word)\n return similar_words\n else:\n logging.error(f\"The word {word} is not in the model's vocabulary.\")\n return []\n except Exception as e:\n logging.exception(f\"An error occurred in search_word_vector: {e}\")\n return []\n\n def get_vector_average(self, text):\n words = text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector_avg = np.mean(vectors, axis=0)\n return vector_avg\n else:\n return np.zeros(self.model.vector_size)\n\n def search_similar_conversations(self, text, top_n=1):\n processed_text = self.preprocess_text(text)\n print(f\"processed_text = {processed_text}\")\n\n query_vector = self.get_vector_average(processed_text)\n with self.conn:\n c = self.conn.cursor()\n c.execute('SELECT id, vector FROM vector_data')\n vector_data = c.fetchall()\n\n if not vector_data:\n return []\n\n ids, vectors = zip(*vector_data)\n vectors = np.array([np.frombuffer(vector, dtype=np.float32) for vector in vectors])\n similarities = cosine_similarity([query_vector], vectors)[0]\n sorted_indices = np.argsort(similarities)[::-1]\n top_indices = sorted_indices[:top_n]\n top_ids = [ids[i] for i in top_indices]\n top_similarities = [similarities[i] for i in top_indices]\n\n result = []\n for response_id, similarity in zip(top_ids, top_similarities):\n # Fetch the corresponding response text for each response_id\n c.execute('SELECT response_raw FROM response_raw WHERE id = ?', (response_id,))\n response_text = c.fetchone()\n if response_text is not None:\n response_text = response_text[0] # Extracting text from the tuple\n # result.append((response_id, response_text, similarity))\n result.append(response_text)\n\n return result" }, { "identifier": "save_settings", "path": "modules/menus/setup_menu.py", "snippet": "def setup_menu():" }, { "identifier": "ShellSpeak", "path": "modules/shellSpeak.py", "snippet": "class ShellSpeak:\n def __init__(self, settings, base_path, vectorDb):\n self.llm_len = int(settings.get(\"llm_size\", 14000))\n self.llm_history_len = int(settings.get(\"llm_history_size\", 4000))\n self.llm_file_len = int(settings.get(\"llm_file_size\", 4000))\n self.llm_folder_len = int(settings.get(\"llm_folder_size\", 4000))\n self.llm_slide_len = int(settings.get(\"llm_slide_len\", 120))\n\n self.temp_file = settings.get(\"temp_file\", \"temp\")\n\n self.llm_output_size = int(settings.get(\"llm_output_size\", 4097))\n self.use_cache = settings.get(\"use_cache\", False)\n self.cache_file = settings.get(\"cache_file\", None)\n\n self.vector_for_commands = settings.get(\"vector_for_commands\", False)\n self.vector_for_history = settings.get(\"vector_for_history\", True)\n self.vector_for_folders = settings.get(\"vector_for_folders\", True)\n\n self.data_file = 'path_to_your_data_file.json'\n self.use_indexing = settings.get('use_indexing', False)\n\n self.vector_db = vectorDb\n\n self.settings = settings\n self.command_history = \"\"\n self.settingsRoot = base_path\n\n self.files = []\n\n self.llm = LLM(model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), use_cache=self.use_cache, cache_file=self.cache_file) #Zephyr7bBeta\n\n self.command_runner = CommandRunner(self)\n\n logging.info(f\"Shell Speak Loaded\")\n\n def capture_input(self):\n # Get current working directory\n current_directory = os.getcwd()\n \n # Get environment (if available)\n environment = os.environ.get('VIRTUAL_ENV', None)\n if environment:\n environment = os.path.basename(environment) # Extracting last part of the path as environment name\n \n # Formatted prompt\n prompt = f\"[green]({environment})[cyan] {current_directory}[white]>\" if environment else f\"{current_directory}{self.settings['command_prompt']}\"\n \n set_input = capture_styled_input(prompt)\n logging.info(f\"Using input : {set_input}\")\n return set_input\n \n def show_file(self, caption, body):\n print_colored_text(f\"[yellow]==== {caption} ====\")\n num_width = len(str(len(body)))\n for line_number, line in enumerate(body, 1): # Start counting from 1\n print_colored_text(f'[yellow]{line_number:{num_width}}:[cyan] {line}') # Adjust the format as needed\n print_colored_text(\"[yellow]====================\")\n\n\n def detect_language(self, code):\n try:\n lexer = lexers.guess_lexer(code)\n return lexer.name\n except lexers.ClassNotFound:\n return None\n \n async def execute_python_script(self, python_section, filename):\n lines = python_section.split('\\n')\n if len(lines) == 1:\n # Single-line script, execute directly\n script = lines[0]\n # script = f\"{self.settings['python_command_prompt']}\\n{script}\"\n output = await self.run_python_script(script)\n return output\n else:\n # Multi-line script, create a python file\n python_filename = f'{self.temp_file}.py'\n if filename:\n # Use commented out filename\n check_filename = filename\n \n if (is_valid_filename(check_filename)):\n python_filename = filename\n\n script = '\\n'.join(lines)\n script = f\"{self.settings['python_command_prompt']}\\n{script}\"\n\n with open(python_filename, 'w') as python_file:\n python_file.write(script)\n\n self.show_file(\"Python File\", script.split('\\n'))\n user_confirmation = capture_styled_input(\"[yellow]Are you sure you want to run this Python script? (yes/no): \")\n if user_confirmation.lower() != 'yes':\n if python_filename == f'{self.temp_file}.py':\n os.remove(python_filename) # Remove temporary python file\n return CommandResult(\"\", \"Run python file Canceled.\")\n \n output = await self.run_python_script(python_filename)\n if python_filename == f'{self.temp_file}.py':\n os.remove(python_filename) # Remove temporary python file\n return output\n \n async def run_python_script(self, script):\n # If the script is a file, use 'python filename.py' to execute\n if script.endswith('.py'):\n command = f'python -u {script}'\n else:\n command = f'python -u -c \"{script}\"'\n result = await self.run_command(command)\n return CommandResult(result.out, result.err)\n \n def extract_script_command(self, script_type, text):\n match = re.search(rf'```{script_type}(.*?)```', text, re.DOTALL)\n if match:\n shell_section = match.group(1).strip()\n else:\n logging.error(f\"No {script_type} section found\")\n shell_section = None\n\n return shell_section\n\n \n \n\n async def execute_shell_section(self, shell_section, filename):\n\n logging.info(f\"Executing Shell Section : {shell_section}\")\n\n shell_section.strip()\n\n lines = shell_section.split('\\n')\n ret_value = CommandResult(\"\", \"\")\n \n if len(lines) == 1:\n # Single-line command, execute directly\n command = lines[0]\n\n ret_value = await self.run_command(command)\n logging.error(f\"Execute Shell Directory Line Strip: {ret_value}\")\n\n else:\n # Multi-line command, create a batch file\n batch_filename = f'{self.temp_file}.bat'\n if lines[0].startswith('REM '):\n # Use commented out filename\n batch_filename = lines[0][4:].strip()\n # lines = lines[1:] # Remove the filename line\n\n logging.info(f\"batch_filename : {batch_filename}\")\n with open(batch_filename, 'w') as batch_file:\n batch_file.write('\\n'.join(lines))\n self.show_file(\"Batch File\", lines)\n user_confirmation = capture_styled_input(\"[yellow]Are you sure you want to run this batch file? (yes/no): \")\n logging.info(f\"user_confirmation : {user_confirmation}\")\n if user_confirmation.lower() != 'yes':\n return CommandResult(\"\", \"Run batch file Canceled.\")\n ret_value = await self.run_command(batch_filename)\n \n logging.info(f\"command output : out: {ret_value.out}, err: {ret_value.err}\")\n if batch_filename == f'{self.temp_file}.bat':\n os.remove(batch_filename) # Remove temporary batch file\n logging.info(f\"removing : {batch_filename}\")\n\n return ret_value\n \n def create_process_group(self):\n # Create a new process group\n process_group_id = os.set_handle_inheritance(0, 1)\n return process_group_id\n\n async def run_command(self, command):\n command += \" && cd\"\n logging.info(f\"run command : {command}\")\n\n stdout, stderr = await self.command_runner.run(command)\n\n \n\n if stderr == \"\":\n lines = stdout.strip().split(\"\\n\")\n if lines:\n new_dir = lines[-1] # Assuming the last line of output contains the new working directory\n if os.path.isdir(new_dir):\n os.chdir(new_dir) # Change to the new working directory in your parent process\n # Remove the last line containing the new directory from the output\n lines = lines[:-1]\n stdout = '\\n'.join(lines)\n else:\n logging.error(f\"Invalid directory: {new_dir}\")\n else:\n logging.error(\"No output to determine the new working directory\")\n\n if stdout.find(\"Traceback (most recent call last):\") > -1:\n stderr = stdout\n stdout = command\n else:\n stderr = f\"Command : {command}, Error: {stderr}\"\n\n logging.info(f\"run return : out: {stdout}, err: {stderr}\")\n\n ret_val = CommandResult(stdout, stderr)\n return ret_val\n \n \n def format_for_display(self, input, output):\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {input}\\nOutput: {output}]\\n\"\n self.display_output(output)\n\n\n def shrink_file_data(self, file_data, target_tokens):\n # Get the current token count of file_data\n current_tokens = get_token_count(file_data)\n\n if current_tokens > target_tokens:\n # Estimate the number of characters to keep based on the average token length\n average_token_length = len(file_data) / current_tokens\n chars_to_keep = int(target_tokens * average_token_length)\n \n # Only keep the last part of file_data\n truncated_data = file_data[-chars_to_keep:]\n return truncated_data\n\n # If the file_data is already within the limit, return it as is\n return file_data\n\n\n def find_relevant_data(file_data, target_tokens):\n # Your logic here to find relevant information within the token count\n return file_data[:target_tokens]\n\n def expand_directories(self, file_paths, exclusions):\n new_file_list = []\n for file_path in file_paths:\n if os.path.isdir(file_path):\n # If the path is a directory, ask the user whether to include its files\n user_decision = input(f\"The path '{file_path}' is a directory. Do you want to add all files in this directory? (y/n): \")\n if user_decision.lower() == 'y':\n # If yes, walk through the directory and add all files\n for root, dirs, files in os.walk(file_path):\n # Remove excluded directories so os.walk doesn't traverse them\n dirs[:] = [d for d in dirs if d not in exclusions]\n for name in files:\n if name not in exclusions:\n new_file_list.append(os.path.join(root, name))\n else:\n # If no, inform the user that the directory is being skipped\n print_colored_text(f\"[blue]Skipping directory '{file_path}'.\")\n else:\n # If the path is a file, just add it to the list\n if os.path.basename(file_path) not in exclusions:\n new_file_list.append(file_path)\n return new_file_list\n\n\n def string_sizer(self, data, context, length=1024, use_vector=True):\n set_data = data.strip()\n token_count = get_token_count(set_data)\n print(f\"token_count = {token_count}\")\n if token_count > length:\n if use_vector:\n relevant_segments = self.vector_db.search_similar_conversations(context, top_n=length)\n # relevant_segments = find_relevant_file_segments(\n # history_text= context,\n # file_data=set_data,\n # window_size=length, # or any other size you deem appropriate (8124)\n # overlap=self.llm_slide_len, # or any other overlap size you deem appropriate\n # top_k=1 # or any other number of segments you deem appropriate\n # )\n # set_data = '\\n'.join([f\"[{item[0]}, {item[1]}, {item[2]}]\" for item in relevant_segments])\n\n set_data = '/n.../n'.join(relevant_segments)\n else:\n set_data = trim_to_right_token_count(set_data, len)\n \n data_tokens = get_token_count(set_data)\n logging.info(f\"Translate to Command History Token Count : {data_tokens}\")\n return data_tokens, set_data\n\n async def translate_to_command(self, user_input):\n user_command_prompt = self.settings['user_command_prompt']\n send_prompt = self.settings['command_prompt']\n max_llm = (self.llm_len - 80) #80 is used to pad json formatting of System Messages and over all prompt size.\n \n max_llm -= get_token_count(send_prompt)\n max_llm -= get_token_count(user_input)\n \n history_tokens, command_history = self.string_sizer(self.command_history, user_input, self.llm_history_len, self.vector_for_history)\n command_history = json.dumps(command_history)\n max_llm -= history_tokens\n\n # Add get folders/Files\n current_directory = os.getcwd()\n folder_list = list_files_and_folders_with_sizes(current_directory)\n folder_list = {\n \"path\": current_directory,\n \"folder_list\": folder_list\n }\n folder_list = json.dumps(folder_list)\n folder_list_tokens, folder_list = self.string_sizer(folder_list, command_history + \"/n\" + user_input, self.llm_folder_len, self.vector_for_commands)\n folder_list = json.dumps(folder_list)\n max_llm -= folder_list_tokens\n\n set_command_files_data = []\n total_tokens = 0\n\n # Extract file paths and exclusion list from user_input\n file_paths = re.findall(r'file:\\s*(\".*?\"|\\S+)', user_input)\n \n # Remove quotes from file paths, if present\n self.files = [fp.strip('\"') for fp in file_paths]\n for f, file in enumerate(self.files):\n exclusions = file.split(',')\n file_path = exclusions[0]\n\n exclusions.pop(0)\n self.files[f] = file_path\n self.exclusions = exclusions\n self.files = self.expand_directories(self.files, self.exclusions)\n\n # Use the new function to expand directories into file lists\n self.files = self.expand_directories(self.files, self.exclusions)\n\n if len(self.files) > 0:\n total_size = 0\n total_data = \"\"\n files_data = []\n \n for file in self.files:\n file_data_content = read_file(file) # Note: Changed to 'file_data_content'\n if len(file_data_content) > 50000: #Cap for NLP = 1000000\n # Prompt the user for a decision\n include_file = input(f\"The file {file} is very large. Do you want to include it? (yes/no): \")\n if include_file.lower() != 'yes' or include_file.lower() != 'y':\n print_colored_text(f\"[yellow]Skipping file: {file}\")\n continue # Skip the rest of the loop and therefore the file\n\n\n file_data = {\n \"file\": file,\n \"file_data\": file_data_content,\n \"file_size\": int(get_file_size(file)),\n \"file_tokens\": get_token_count(file_data_content) # Note: Changed to 'file_data_content'\n }\n \n total_size += file_data[\"file_size\"]\n total_data += file_data[\"file_data\"]\n\n files_data.append(file_data)\n\n # Sort files_data by file_tokens in descending order\n files_data = sorted(files_data, key=lambda x: x['file_tokens'], reverse=True)\n\n remaining_tokens = self.llm_file_len\n remaining_tokens_split = int(remaining_tokens / len(files_data)) + 1\n new_files_data = []\n for f, file in enumerate(files_data):\n if file[\"file_tokens\"] > remaining_tokens_split:\n file[\"fileIndex\"] = f\n file[\"file_tokens\"] = remaining_tokens_split\n new_files_data.append(file)\n else:\n remaining_tokens -= file[\"file_tokens\"]\n div_val = (len(files_data) - (len(files_data) - len(new_files_data)))\n if div_val == 0:\n div_val = 1\n\n remaining_tokens_split = int(remaining_tokens / div_val)\n \n if len(new_files_data) > 0:\n for new_file in new_files_data:\n print_colored_text(f\"[cyan]File {new_file['file']} Trimming\")\n relevant_segments = self.vector_db.search_similar_conversations(new_file['file_data'])\n # relevant_segments = find_relevant_file_segments(\n # history_text=folder_list + \"\\n\" + command_history + \"\\n\"+ user_input,\n # file_data=new_file['file_data'],\n # window_size=new_file['file_tokens'], # or any other size you deem appropriate (8124)\n # overlap=self.llm_slide_len, # or any other overlap size you deem appropriate\n # top_k=1 # or any other number of segments you deem appropriate\n # )\n new_file['file_data'] = '/n.../n'.join(relevant_segments)\n file_data_content = new_file['file_data']\n \n new_file['file_tokens'] = get_token_count(file_data_content)\n\n files_data[new_file[\"fileIndex\"]] = new_file\n\n total_tokens = 0\n for file_data in files_data:\n total_tokens += file_data[\"file_tokens\"]\n\n # Check if the file_data is binary and encode it with base64 if so\n try:\n # This will work if 'file_data' is text\n encoded_data = json.dumps(file_data['file_data'])\n except TypeError:\n # If 'file_data' is binary, encode it with base64\n encoded_data = base64.b64encode(file_data['file_data']).decode('utf-8')\n\n add_command_files_data = {\n \"file:\": file_data[\"file\"],\n \"data:\": encoded_data\n }\n\n set_command_files_data.append(add_command_files_data)\n \n\n command_files_data = json.dumps(set_command_files_data)\n logging.info(f\"Translate to Command File Token Count : {total_tokens}\")\n\n max_llm -= total_tokens\n\n commands = map_possible_commands()\n command_tokens, commands = self.string_sizer(commands, command_files_data + \"\\n\" + folder_list + \"\\n\" + command_history + \"\\n\"+ user_input, max_llm, self.vector_for_commands)\n \n command_tokens = get_token_count(commands)\n logging.info(f\"Translate to Command Commands Token Count : {command_tokens}\")\n \n logging.info(f\"Translate to Command : {user_input}\")\n\n kwargs = {\n 'user_prompt': user_input,\n 'get_os_name': get_os_name(),\n 'commands': commands,\n 'command_history': command_history,\n 'command_files_data': command_files_data,\n 'current_folders_data': folder_list\n }\n user_command_prompt = replace_placeholders(user_command_prompt, **kwargs)\n system_command_prompt = replace_placeholders(send_prompt, **kwargs)\n\n user_tokens = get_token_count(user_command_prompt)\n system_tokens = get_token_count(system_command_prompt)\n logging.info(f\"Translate to Command User Token Count : {user_tokens}\")\n logging.info(f\"Translate to Command System Token Count : {system_tokens}\")\n\n logging.info(f\"Translate to Command use System Prompt : {system_command_prompt}\")\n logging.info(f\"Translate to Command use User Prompt : {user_command_prompt}\")\n # command_output = self.llm.ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"json_object\")\n # loop = asyncio.get_event_loop()\n # command_output = await loop.run_in_executor(None, lambda: self.llm.ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\"))))\n command_output = await self.llm.async_ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"json_object\")\n # save_history_data(user_command_prompt, f\"User : {system_command_prompt}\", self.settings)\n self.vector_db.store_long_term_memory(f\"System : {system_command_prompt}\\n User : {user_command_prompt}\")\n logging.info(f\"Translate to Command return Response : {command_output}\")\n\n display_content = \"\"\n display_error = None\n try:\n if not isinstance(command_output, str):\n # Convert non-string command_output to a JSON-formatted string\n command_output_obj = {\n \"type\": \"Unknown\",\n \"Content\": f\"{command_output}\"\n }\n try:\n command_output_obj = json.loads(command_output)\n except json.JSONDecodeError as e:\n # Handle JSON decoding error if it occurs\n # You might want to log this error or handle it as per your application's needs\n command_output_obj = {\"type\": \"Error\", \"content\": str(e)}\n\n\n logging.info(f\"Translate return Response : {command_output}\")\n type = command_output_obj[\"type\"]\n content = command_output_obj.get(\"content\", None)\n err = content.get(\"error\", None)\n\n if not err:\n if type == \"command_execution\":\n command = content[\"command\"]\n if len(command) > 6 and command[:6] == \"python\":\n while True:\n run_as_mod = capture_styled_input(\"[yellow]Do you want to add our compatibility code? (yes/no/exit) :\")\n run_as_code = False\n cancel_run = False\n if run_as_mod == \"yes\" or run_as_mod == \"y\":\n run_as_code = True\n break\n elif run_as_mod == \"no\" or run_as_mod == \"n\":\n run_as_code = False\n break\n elif run_as_mod == \"exit\":\n cancel_run = True\n break\n else:\n print_colored_text(\"[red]Invalid Input!\")\n\n if not cancel_run:\n if run_as_code:\n # Extract the Python script or module name from the command\n command_parts = command_output.split()\n script_name = None\n for i, part in enumerate(command_parts):\n if part.endswith(\".py\"):\n script_name = part\n break\n elif part == \"-m\" and i < len(command_parts) - 1:\n script_name = command_parts[i + 1] + \".py\" # Assuming the module name is a Python file name\n break\n\n # Open and read the script if the name is found\n if script_name:\n try:\n with open(script_name, 'r') as file:\n python_code = file.read()\n\n\n # Now, python_code contains the content of the Python file\n # You can now pass this code to execute_python_script function\n display_content = await self.execute_python_script(python_code)\n\n except FileNotFoundError:\n print_colored_text(f\"[red]Error: The file {script_name} was not found.\")\n logging.info(f\"Translate Command Error: The file {script_name} was not found.\")\n except Exception as e:\n print_colored_text(f\"[red]Error: An error occurred while reading the file {script_name}: {e}\")\n logging.info(f\"Translate Command Error: An error occurred while reading the file {script_name}: {e}\")\n else:\n print_colored_text(\"[red]Error: No Python script name could be extracted from the command.\")\n logging.info(f\"Translate Command Error: No Python script name could be extracted from the command.\")\n else:\n success, command_output = await self.execute_command(command_output)\n if not success:\n print_colored_text(f\"[red]Exe Error: {command_output.err}\")\n display_content = command_output.err\n else:\n display_content = command_output.out\n logging.info(f\"Translate Command Execute : {command_output}\")\n else:\n logging.info(f\"Translate Command Canceled : {command_output}\")\n else:\n success, command_output = await self.execute_command(command)\n if not success and command_output.err.strip() != \"\":\n print_colored_text(f\"[red]Exe Error: {command_output.err}\")\n display_content = command_output.err\n else:\n display_content = command_output.out\n logging.info(f\"Translate Command Execute : {display_content}\")\n pass\n elif type == \"script_creation\":\n script_text = content['script']\n script_type = content['script_type']\n script_filename = content.get('script_filename', None)\n\n if script_type == \"shell\" or script_type == \"batch\" or script_type == \"bash\":\n display_content = await self.execute_shell_section(script_text, script_filename)\n elif script_type == \"python\":\n display_content = await self.execute_python_script(script_text, script_filename)\n else:\n display_content = CommandResult(script_text, f\"Invalid Script Type : {script_type}\")\n\n if command_output.err != \"\":\n print_colored_text(f\"[red]Shell Error: {command_output.err} with {command_output.out}\")\n display_content = command_output.err\n else: \n display_content = command_output.out\n\n logging.info(f\"Translate Shell Execute : {command_output}\")\n elif type == \"response_formatting\":\n display_content = content[\"text\"]\n elif type == \"error_handling\":\n display_content = content[\"type\"]\n display_error = err\n else:\n display_content = command_output\n display_error = f\"Invalid command type '{type}'.\"\n else:\n display_content = command_output\n display_error = err\n logging.info(f\"Translate to Command Object Error : {err}, command_output= {command_output}\")\n\n\n except Exception as e:\n display_content = command_output\n display_error = e\n logging.info(f\"Translate to Command Object Error : {e}, command_output= {command_output}\")\n\n logging.info(f\"Translate to Command Display Content : {display_content}\")\n\n if display_error:\n return display_error\n \n return display_content\n \n def check_script(self, code_type, text):\n command_output = text\n if f'```{code_type}' in text:\n command_output = self.extract_script_command(code_type, text)\n logging.info(f\"Translate '{code_type}' Code : {text}\")\n\n return command_output\n\n async def execute_command(self, command):\n try:\n logging.info(f\"Execute Command : {command}\")\n result = await self.run_command(command)\n if result.err:\n logging.info(f\"Execute Error : {result.err}\")\n return False, result\n \n logging.info(f\"Execute Output : {result.out}\")\n\n return True, result\n except Exception as e:\n return False, CommandResult(\"\", str(e))\n\n def translate_output(self, output, is_internal=False):\n logging.info(f\"Translate Output : {output}\")\n send_prompt = self.settings['display_prompt']\n\n total_tokens = self.llm_output_size - (get_token_count(send_prompt) + get_token_count(output) + 80)\n\n set_command_history = self.command_history\n token_count = get_token_count(set_command_history)\n\n if token_count > total_tokens:\n set_command_history = trim_to_right_token_count(set_command_history, total_tokens)\n\n max_llm = (self.llm_len - 80) #80 is used to padd json formatting of System Messages and over all prompt size.\n \n max_llm -= get_token_count(send_prompt)\n max_llm -= get_token_count(output)\n \n history_tokens, command_history = self.string_sizer(self.command_history, output, self.llm_history_len)\n command_history = json.dumps(command_history)\n max_llm -= history_tokens\n\n # Add get folders/Files\n current_directory = os.getcwd()\n folder_list = list_files_and_folders_with_sizes(current_directory)\n folder_list = {\n \"path\": current_directory,\n \"folder_list\": folder_list\n }\n folder_list = json.dumps(folder_list)\n folder_list_tokens, folder_list = self.string_sizer(folder_list, self.command_history + \"/n\" + output, self.llm_folder_len)\n folder_list = json.dumps(folder_list)\n max_llm -= folder_list_tokens\n\n kwargs = {\n 'get_os_name': get_os_name(),\n 'command_history': set_command_history,\n 'internal_script': str(is_internal)\n }\n send_prompt = replace_placeholders(send_prompt, **kwargs)\n\n logging.info(f\"Translate Output Display System Prompt : {send_prompt}\")\n logging.info(f\"Translate Output Display User Prompt : {output}\")\n display_output = self.llm.ask(send_prompt, output, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"text\")\n # save_history_data(output, f\"Assistant : {send_prompt}\", self.settings)\n self.vector_db.store_long_term_memory(f\"System : {send_prompt}\\n User : {output}\")\n\n logging.info(f\"Translate Output Display Response : {display_output}\")\n return display_output\n\n def display_output(self, output):\n logging.info(f\"Display Output : {output}\")\n print_colored_text(output)\n\n def display_about(self):\n print_colored_text(\"[bold][yellow]======================================================\\nShellSpeak\\n======================================================\\n[white]AI powered Console Input\\nVisit: https://github.com/TheCompAce/ShellSpeak\\nDonate: @BradfordBrooks79 on Venmo\\n\\n[grey]Type 'help' for Help.\\n[yellow]======================================================\\n\")\n\n def display_help(self):\n print_colored_text(\"[bold][yellow]======================================================\\nShellSpeak Help\\n======================================================\\n[white]Type:\\n'exit': to close ShellSpeak\\n'user: /command/': pass a raw command to execute then reply threw the AI\\n'file: /filepath/': adds file data to the command prompt. (use can send a folder path, using ',' to exclude folders and files.)\\n'clm': Clear command Memory\\n'rset': Reloads the settings file (this happens on every loading of the prompt.)\\n'about': Shows the About Information\\n'help': Shows this Help information.\\n[yellow]======================================================\\n\")\n\n async def run(self):\n self.display_about()\n while True:\n self.settings = load_settings(self.settingsRoot)\n self.files = []\n\n user_input = self.capture_input()\n if user_input.lower() == 'exit':\n break\n elif user_input.lower() == 'about':\n self.display_about()\n elif user_input.lower() == 'help':\n self.display_help()\n elif user_input.lower() == 'rset':\n self.display_output(f\"Settings Updated.\")\n elif user_input.lower() == 'rset':\n self.display_output(f\"Settings Updated.\")\n elif user_input.lower() == 'clm':\n self.command_history = \"\"\n # self.command_history += f\"Command Input: {user_input}\\nCommand Output: Command History cleared.\\n\"\n self.display_output(f\"Command Memory (History) Cleared.\")\n else:\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if user_input.lower().startswith('user: '):\n # Bypass AI translation and send raw command to the OS\n raw_command = user_input[6:] # Extract the command part from user_input\n try:\n result = await self.run_command(raw_command)\n except Exception as e:\n translated_command = e\n translated_output = self.translate_output(result.out)\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {user_input}\\nOutput: {result.out} Error: {result.err}]\\n\"\n # self.display_output(f\"Output:\\n{result.out}\\nError:\\n{result.err}\")\n self.display_output(translated_output)\n else:\n # Continue with AI translation for the command\n try:\n translated_command = await self.translate_to_command(user_input)\n except Exception as e:\n translated_command = {\n \"err\" : \"Invalid user_input!\",\n \"out\": e\n }\n # if translated_command.err == \"\":\n # translated_output = self.translate_output(translated_command)\n # self.command_history += f\"Command Input: {user_input}\\nCommand Output: {translated_output}\\n\"\n # self.display_output(translated_output)\n #else:\n user_input = redact_json_values(user_input, [\"run_command_list\", \"command_files\"])\n\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {user_input}\\nOutput: {translated_command}]\\n\"\n if not isinstance(translated_command, str):\n translated_command = str(translated_command) # Convert non-string output to string\n translated_output = self.translate_output(translated_command)\n self.display_output(translated_output)" }, { "identifier": "load_settings", "path": "modules/utils.py", "snippet": "def load_settings(filepath):\n try:\n with open(os.path.join(filepath, \"settings.json\"), 'r') as f:\n settings = json.load(f)\n chk_file = os.path.join(filepath, settings['command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['command_prompt'] = f.read()\n \n chk_file = os.path.join(filepath, settings['display_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['display_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['user_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['user_command_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['python_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['python_command_prompt'] = f.read()\n\n return settings\n except FileNotFoundError:\n return {}" } ]
import json import os import sys import asyncio import json from modules.vectorDatabase import VectorDatabase from datetime import datetime from modules.menus.setup_menu import save_settings, setup_menu from modules.shellSpeak import ShellSpeak from modules.utils import load_settings
11,491
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally shellSpeak = ShellSpeak(settings, base_path, vector_db) await shellSpeak.run() def main(): base_path = os.path.abspath(".") settings = load_settings(base_path) # FAISS Index check and build prompt if settings.get('use_indexing', False): system_folder_path = os.path.join(base_path, 'system') # history_json_path = os.path.join(system_folder_path, 'history.json') vector_db_path = os.path.join(system_folder_path, 'vector') vector_db = VectorDatabase(path=settings.get('vector_db_path', system_folder_path), name=settings.get('vector_db_name', vector_db_path)) if not os.path.exists(system_folder_path): os.makedirs(system_folder_path) # Check if 'system' folder and 'history.json' exist # if not os.path.exists(system_folder_path) or not os.path.exists(history_json_path): # settings['use_indexing'] = False if vector_db.needs_index_update(): user_decision = input("A new index needs to be built. Do you want to build it now? (yes/no): ") if user_decision.lower() in ['yes', 'y']: print("Building index... (Grab a Coffee.)") vector_db.train_untrained_responses() print("Index built and saved successfully.") settings['last_build_date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') save_settings(settings, os.path.join(base_path, 'settings.json')) else: print("Skipping index building.") # Check for command-line arguments if len(sys.argv) > 1 and sys.argv[1] == '/start': run_async_function(start_shell_speak, settings, base_path, vector_db) return # Display menu while True: print("\nMenu:") print("1. Setup") print("2. Run") print("3. Exit") print("-----------------------------------------------------------------") print("(You can also start the script with /start to Start automaticly.)") print("-----------------------------------------------------------------") choice = input("Choose an option: ") if choice == '1':
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally shellSpeak = ShellSpeak(settings, base_path, vector_db) await shellSpeak.run() def main(): base_path = os.path.abspath(".") settings = load_settings(base_path) # FAISS Index check and build prompt if settings.get('use_indexing', False): system_folder_path = os.path.join(base_path, 'system') # history_json_path = os.path.join(system_folder_path, 'history.json') vector_db_path = os.path.join(system_folder_path, 'vector') vector_db = VectorDatabase(path=settings.get('vector_db_path', system_folder_path), name=settings.get('vector_db_name', vector_db_path)) if not os.path.exists(system_folder_path): os.makedirs(system_folder_path) # Check if 'system' folder and 'history.json' exist # if not os.path.exists(system_folder_path) or not os.path.exists(history_json_path): # settings['use_indexing'] = False if vector_db.needs_index_update(): user_decision = input("A new index needs to be built. Do you want to build it now? (yes/no): ") if user_decision.lower() in ['yes', 'y']: print("Building index... (Grab a Coffee.)") vector_db.train_untrained_responses() print("Index built and saved successfully.") settings['last_build_date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') save_settings(settings, os.path.join(base_path, 'settings.json')) else: print("Skipping index building.") # Check for command-line arguments if len(sys.argv) > 1 and sys.argv[1] == '/start': run_async_function(start_shell_speak, settings, base_path, vector_db) return # Display menu while True: print("\nMenu:") print("1. Setup") print("2. Run") print("3. Exit") print("-----------------------------------------------------------------") print("(You can also start the script with /start to Start automaticly.)") print("-----------------------------------------------------------------") choice = input("Choose an option: ") if choice == '1':
setup_menu()
1
2023-10-31 23:35:19+00:00
16k
qym7/SparseDiff
sparse_diffusion/diffusion_model_sparse.py
[ { "identifier": "utils", "path": "sparse_diffusion/utils.py", "snippet": "def setup_wandb(cfg):\ndef create_folders(args):\ndef to_dense(x, edge_index, edge_attr, batch, charge):\ndef to_dense_node(x, batch):\ndef to_dense_edge(edge_index, edge_attr, batch, max_num_nodes):\ndef encode_no_edge(E):\ndef to_sparse(X, E, y, node_mask, charge=None):\n def __init__(self, X, E, y, charge=None, t_int=None, t=None, node_mask=None):\n def device_as(self, x: torch.Tensor):\n def type_as(self, x: torch.Tensor):\n def mask(self, node_mask=None, collapse=False):\n def collapse(self, collapse_charge=None):\n def __repr__(self):\n def copy(self):\n def __init__(\n self, node, edge_index, edge_attr, y, ptr=None, batch=None, charge=None\n ):\n def type_as(self, x: torch.Tensor):\n def to_device(self, device: str):\n def coalesce(self):\n def symmetry(self):\n def collapse(self, collapse_charge=None):\n def __init__(self, keep_chain):\n def append(self, data):\ndef delete_repeated_twice_edges(edge_index, edge_attr): \ndef to_undirected(edge_index, edge_attr=None):\ndef undirected_to_directed(edge_index, edge_attr=None):\ndef ptr_to_node_mask(ptr, batch, n_node):\ndef concat_sparse_graphs(graphs):\ndef split_samples(samples, start_idx, end_idx):\ndef densify_noisy_data(sparse_noisy_data):\n E = to_dense_edge(edge_index, edge_attr, batch, max_num_nodes)\n E = to_dense_adj(\n edge_index=edge_index,\n batch=batch,\n edge_attr=edge_attr,\n max_num_nodes=max_num_nodes,\n )\n E = encode_no_edge(E)\n E[:, :, :, 0] = first_elt\nclass PlaceHolder:\nclass SparsePlaceHolder:\nclass SparseChainPlaceHolder:" }, { "identifier": "diffusion_utils", "path": "sparse_diffusion/diffusion/diffusion_utils.py", "snippet": "def sum_except_batch(x):\ndef assert_correctly_masked(variable, node_mask):\ndef sample_gaussian(size):\ndef sample_gaussian_with_mask(size, node_mask):\ndef clip_noise_schedule(alphas2, clip_value=0.001):\ndef cosine_beta_schedule(timesteps, s=0.008, raise_to_power: float = 1):\ndef cosine_beta_schedule_discrete(timesteps, s=0.008):\ndef custom_beta_schedule_discrete(timesteps, average_num_nodes=50, s=0.008):\ndef gaussian_KL(q_mu, q_sigma):\ndef cdf_std_gaussian(x):\ndef SNR(gamma):\ndef inflate_batch_array(array, target_shape):\ndef sigma(gamma, target_shape):\ndef alpha(gamma, target_shape):\ndef check_mask_correct(variables, node_mask):\ndef check_tensor_same_size(*args):\ndef sigma_and_alpha_t_given_s(\n gamma_t: torch.Tensor, gamma_s: torch.Tensor, target_size: torch.Size\n):\ndef reverse_tensor(x):\ndef sample_discrete_features(probX, probE, node_mask, prob_charge=None):\ndef sample_discrete_edge_features(probE, node_mask):\ndef sample_discrete_node_features(probX, node_mask):\ndef compute_posterior_distribution(M, M_t, Qt_M, Qsb_M, Qtb_M):\ndef compute_sparse_posterior_distribution(M, M_t, Qt_M, Qsb_M, Qtb_M):\ndef compute_batched_over0_posterior_distribution(X_t, Qt, Qsb, Qtb):\ndef mask_distributions(\n true_X, true_E, pred_X, pred_E, node_mask, true_charge=None, pred_charge=None\n):\ndef posterior_distributions(X, E, X_t, E_t, y_t, Qt, Qsb, Qtb, charge, charge_t):\ndef sample_discrete_feature_noise(limit_dist, node_mask):\ndef sample_sparse_discrete_feature_noise(limit_dist, node_mask):\ndef compute_sparse_batched_over0_posterior_distribution(\n input_data, batch, Qt, Qsb, Qtb\n):\n M = M.flatten(start_dim=1, end_dim=-2).to(\n torch.float32\n ) # (bs, N, d) with N = n or n * n\n U_X = x_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max)\n U_E = e_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max, n_max)\n U_X = U_X.type_as(long_mask)\n U_E = U_E.type_as(long_mask)\n U_X = F.one_hot(U_X, num_classes=x_limit.shape[-1]).float()\n U_E = F.one_hot(U_E, num_classes=e_limit.shape[-1]).float()\n U_E = U_E * upper_triangular_mask\n U_E = U_E + torch.transpose(U_E, 1, 2)" }, { "identifier": "get_computational_graph", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def get_computational_graph(\n triu_query_edge_index,\n clean_edge_index,\n clean_edge_attr,\n triu=True,\n):\n \"\"\"\n concat and remove repeated edges of query_edge_index and clean_edge_index\n mask the position of query_edge_index\n in case where query_edge_attr is None, return query_edge_attr as 0\n else, return query_edge_attr for all query_edge_index\n (used in apply noise, when we need to sample the query edge attr)\n \"\"\"\n # get dimension information\n de = clean_edge_attr.shape[-1]\n device = triu_query_edge_index.device\n\n # create default query edge attr\n default_query_edge_attr = torch.zeros((triu_query_edge_index.shape[1], de)).to(\n device\n )\n default_query_edge_attr[:, 0] = 1\n\n # if query_edge_attr is None, use default query edge attr\n if triu:\n # make random edges symmetrical\n query_edge_index, default_query_edge_attr = utils.to_undirected(\n triu_query_edge_index, default_query_edge_attr\n )\n _, default_query_edge_attr = utils.to_undirected(\n triu_query_edge_index, default_query_edge_attr\n )\n else:\n query_edge_index, default_query_edge_attr = triu_query_edge_index, default_query_edge_attr\n\n # get the computational graph: positive edges + random edges\n comp_edge_index = torch.hstack([clean_edge_index, query_edge_index])\n default_comp_edge_attr = torch.argmax(\n torch.vstack([clean_edge_attr, default_query_edge_attr]), -1\n )\n\n # reduce repeated edges and get the mask\n assert comp_edge_index.dtype == torch.long\n _, min_default_edge_attr = coalesce(\n comp_edge_index, default_comp_edge_attr, reduce=\"min\"\n )\n\n max_comp_edge_index, max_default_edge_attr = coalesce(\n comp_edge_index, default_comp_edge_attr, reduce=\"max\"\n )\n query_mask = min_default_edge_attr == 0\n comp_edge_attr = F.one_hot(max_default_edge_attr.long(), num_classes=de).float()\n\n return query_mask, max_comp_edge_index, comp_edge_attr" }, { "identifier": "mask_query_graph_from_comp_graph", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def mask_query_graph_from_comp_graph(\n triu_query_edge_index, edge_index, edge_attr, num_classes\n):\n query_edge_index = utils.to_undirected(triu_query_edge_index)\n # import pdb; pdb.set_trace()\n\n all_edge_index = torch.hstack([edge_index, query_edge_index])\n all_edge_attr = torch.hstack(\n [\n torch.argmax(edge_attr, -1),\n torch.zeros(query_edge_index.shape[1]).to(edge_index.device),\n ]\n )\n\n assert all_edge_index.dtype == torch.long\n _, min_edge_attr = coalesce(all_edge_index, all_edge_attr, reduce=\"min\")\n\n max_edge_index, max_edge_attr = coalesce(\n all_edge_index, all_edge_attr, reduce=\"max\"\n )\n\n return (\n min_edge_attr == 0,\n F.one_hot(max_edge_attr.long(), num_classes=num_classes),\n max_edge_index,\n )" }, { "identifier": "sample_non_existing_edge_attr", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def sample_non_existing_edge_attr(query_edges_dist_batch, num_edges_to_sample):\n device = query_edges_dist_batch.device\n max_edges_to_sample = int(num_edges_to_sample.max())\n\n if max_edges_to_sample == 0:\n return torch.tensor([]).to(device)\n\n query_mask = (\n torch.ones((len(num_edges_to_sample), max_edges_to_sample))\n .cumsum(-1)\n .to(device)\n )\n query_mask[\n query_mask > num_edges_to_sample.unsqueeze(-1).repeat(1, max_edges_to_sample)\n ] = 0\n query_mask[query_mask > 0] = 1\n query_edge_attr = (\n torch.multinomial(query_edges_dist_batch, max_edges_to_sample, replacement=True)\n + 1\n )\n query_edge_attr = query_edge_attr.flatten()[query_mask.flatten().bool()]\n\n return query_edge_attr" }, { "identifier": "condensed_to_matrix_index_batch", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def condensed_to_matrix_index_batch(condensed_index, num_nodes, edge_batch, ptr):\n \"\"\"From https://stackoverflow.com/questions/5323818/condensed-matrix-function-to-find-pairs.\n condensed_index: (E) example: [0, 1, 0, 2] where [0, 1] are edges for graph0 and [0,2] edges for graph 1\n num_nodes: (bs)\n edge_batch: (E): tells to which graph each edge belongs\n ptr: (bs+1): contains the offset for the number of nodes in each graph.\n \"\"\"\n bb = -2 * num_nodes[edge_batch] + 1\n\n # Edge ptr adds an offset of n (n-1) / 2 to each edge index\n ptr_condensed_index = condensed_index\n ii = torch.div(\n (-bb - torch.sqrt(bb**2 - 8 * ptr_condensed_index)), 2, rounding_mode=\"floor\"\n )\n jj = (\n ptr_condensed_index\n + torch.div(ii * (bb + ii + 2), 2, rounding_mode=\"floor\")\n + 1\n )\n return torch.vstack((ii.long(), jj.long())) + ptr[edge_batch]" }, { "identifier": "sample_query_edges", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sample_query_edges(\n num_nodes_per_graph: Tensor, edge_proportion=None, num_edges_to_sample=None\n):\n \"\"\"Sample edge_proportion % of edges in each graph\n num_nodes_per_graph: (bs): tensor of int.\n Return: edge_index, batch\n \"\"\"\n assert num_nodes_per_graph.dtype == torch.long\n # num_nodes could be 1 in QM9\n assert torch.all(num_nodes_per_graph >= 1), num_nodes_per_graph\n\n batch_size = len(num_nodes_per_graph)\n device = num_nodes_per_graph.device\n\n n = num_nodes_per_graph\n max_condensed_value = (n * (n - 1) / 2).long()\n if num_edges_to_sample is None and edge_proportion is not None:\n assert 0 < edge_proportion <= 1, edge_proportion\n num_edges_to_sample = torch.ceil(edge_proportion * max_condensed_value).long()\n elif num_edges_to_sample is not None:\n assert num_edges_to_sample.dtype == torch.long\n else:\n raise ValueError(\n \"Either edge_proportion or num_edges_to_sample should be provided\"\n )\n\n condensed_index, edge_batch = sampled_condensed_indices_uniformly(\n max_condensed_value, num_edges_to_sample\n )\n\n if batch_size == 1:\n edge_index = condensed_to_matrix_index(condensed_index, num_nodes=n[0])\n return edge_index, torch.zeros(n, dtype=torch.long, device=device)\n\n if len(torch.unique(num_nodes_per_graph)) == 1:\n # Case of several graphs of the same size\n # Add the offset to the edge_index\n offset = torch.cumsum(num_nodes_per_graph, dim=0)[:-1] # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n\n edge_index = condensed_to_matrix_index_batch(\n condensed_index,\n num_nodes=num_nodes_per_graph,\n edge_batch=edge_batch,\n ptr=offset,\n )\n return edge_index, torch.arange(batch_size, device=device).repeat_interleave(n)\n\n # Most general case: graphs of varying sizes\n # condensed_index = randperm_expanded[complete_mask] # (sum(num_edges_per_graph))\n offset = torch.cumsum(num_nodes_per_graph, dim=0)[:-1] # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n edge_index = condensed_to_matrix_index_batch(\n condensed_index,\n num_nodes=num_nodes_per_graph,\n edge_batch=edge_batch,\n ptr=offset,\n )\n # Get the batch information\n batch = torch.arange(batch_size, device=device).repeat_interleave(\n num_nodes_per_graph\n )\n return edge_index, batch" }, { "identifier": "sample_non_existing_edges_batched", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sample_non_existing_edges_batched(\n num_edges_to_sample, existing_edge_index, num_nodes, batch\n):\n \"\"\"Sample non-existing edges from a complete graph.\n num_edges_to_sample: (bs) long\n existing_edge_index: (2, E)\n num_nodes: (bs) long\n batch: (N) long\n existing_edge_index only contains edges that exist in the top part of triangle matrix\n \"\"\"\n device = existing_edge_index.device\n unit_graph_mask = num_nodes == 1\n unit_graph_mask_offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.bool), unit_graph_mask[:-1])\n )\n\n # Compute the number of existing and non-existing edges.\n num_edges_total = (num_nodes * (num_nodes - 1) / 2).long()\n # Count existing edges using global pooling. In case a graph has no edge, global_add_pool\n # May return something of the wrong length. To avoid this, add a 0 for each graph\n # TODO: check if it can be simplified using the size argument of global add pool\n # full_edge_count = torch.hstack((torch.ones(existing_edge_index.shape[1], device=device),\n # torch.zeros(batch.max()+1, device=device))) # (ne+bs)\n # full_edge_batch = torch.hstack((batch[existing_edge_index[0]],\n # torch.arange(batch.max()+1, device=device))) # (ne+bs)\n # num_edges_existing = pool.global_add_pool(x=full_edge_count, batch=full_edge_batch).long()\n num_edges_existing = pool.global_add_pool(\n x=torch.ones(existing_edge_index.shape[1], device=device),\n batch=batch[existing_edge_index[0]],\n size=len(num_edges_to_sample),\n ).long()\n num_non_existing_edges = num_edges_total - num_edges_existing\n assert (num_edges_to_sample <= num_non_existing_edges).all(), (\n num_edges_to_sample,\n num_non_existing_edges,\n )\n\n # Sample non-existing edge indices without considering existing edges.\n # print(\"Num edges non existing\", num_non_existing_edges)\n # multinomial and not randint because we want to sample without replacement\n sampled_indices, sampled_edge_batch = sampled_condensed_indices_uniformly(\n max_condensed_value=num_non_existing_edges,\n num_edges_to_sample=num_edges_to_sample,\n )\n\n # Compute the offset (bs, ) for each graph, where offset -> nbr of nodes, sq_offset -> nbr of edges\n # Go from a matrix problem to a 1d problem, it is easier\n existing_edge_batch = batch[existing_edge_index[0]]\n num_edges_total = (num_nodes * (num_nodes - 1) / 2).long()\n sq_offset = torch.cumsum(num_edges_total, dim=0)[:-1] # (bs - 1)\n # Prepend a 0\n sq_offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), sq_offset)\n ) # (bs)\n\n offset = torch.cumsum(num_nodes, dim=0)[\n :-1\n ] # (bs - 1) # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n # existing_indices (E, ) is of form [0 1 2 3 4 0 2 3 4]\n rescaled_edge_index = (\n existing_edge_index - offset[existing_edge_batch]\n ) # of form [0 1 2 3 4 0 2 3 4]\n existing_indices = matrix_to_condensed_index_batch(\n rescaled_edge_index, num_nodes=num_nodes, edge_batch=existing_edge_batch\n )\n\n # Add offset to the sampled indices\n # Example of sampled condensed: [0 3 1 0 2]\n epsilon = 0.1\n sampled_indices_offset = sq_offset[sampled_edge_batch] # (E_sample, )\n # print(\"sampled indices\", sampled_indices)\n # print(\"sampled edge batch\", sampled_edge_batch)\n samp_ind_w_offset = sampled_indices + sampled_indices_offset\n samp_ind_w_offset = torch.sort(samp_ind_w_offset)[\n 0\n ] # E.g. [0 1 3 6 8], where [0 1 3] belong to a graph of 4 nodes, [6 8] to a graph of 3 nodes\n # print(\"Sampled indices with offset\", samp_ind_w_offset)\n # add small value to create an order later in the sort\n samp_ind_w_offset = samp_ind_w_offset + epsilon\n\n # Add virtual edges to the existing edges to mark the beginning of each graph, for batch processing\n # After adding epsilon, sqrt_ptr is smaller than all edges of the next graph, and bigger than all edges of the current graph\n # * when there exists graphs with size 1, there might be identical values in sq_offset, also in virtual nodes\n existing_ind_w_offset = existing_indices + sq_offset[existing_edge_batch]\n virtual_nodes = (\n sq_offset - epsilon\n ) # Introduce virtual nodes that will be used later to split graphs\n # add different offset for graphs of size 1 to separate them and their following graphs\n virtual_nodes[unit_graph_mask] = virtual_nodes[unit_graph_mask] - 0.1\n existing_ind_w_offset = torch.cat((existing_ind_w_offset, virtual_nodes))\n existing_ind_w_offset, existing_condensed_offset_argsort = torch.sort(\n existing_ind_w_offset\n )\n # print(\"Existing condensed indices with offset\", existing_ind_w_offset)\n virtual_existing_mask = torch.cat(\n (\n torch.zeros(len(existing_indices), dtype=torch.long, device=device),\n torch.ones(len(sq_offset), dtype=torch.long, device=device),\n )\n )\n virtual_existing_mask = virtual_existing_mask[\n existing_condensed_offset_argsort\n ] # [1 0 0 0 1 0 0]\n # print('Virtual nodes mask', virtual_existing_mask)\n\n # Compute the mask of free edges\n # When there exists graphs with size 1, free spots might be negative, which means that\n # existing condensed indices have same neighbor value\n free_spots = (\n torch.diff(existing_ind_w_offset, prepend=torch.tensor([-1]).to(device)) - 1\n ) # [-0.1, 0, 2, 9, 9.9, 18, 25]\n free_spots = torch.ceil(free_spots).long() # [0, 0, 1, 6, 0, 8, 6]\n # print(\"Free spots\", free_spots)\n # Map these values to index\n cumsum = torch.cumsum(free_spots, dim=0).long() # [1 2 3 4 5 6 7]\n cumsum_batch = (\n torch.cumsum(virtual_existing_mask, dim=0).long() - 1\n ) # [1 1 1 1 2 2 2] - 1\n # delete the offset of free spots to cumsum\n cumsum_offset = cumsum[virtual_existing_mask.bool()][cumsum_batch]\n # print(\"Cumsum offset\", cumsum_offset)\n # print(\"Cumsum before removing offset\", cumsum)\n cumsum = cumsum - cumsum_offset # [0 2 5 0 2 5]\n # add the offset of edge number to cumsum\n cumsum = cumsum + sq_offset[cumsum_batch] # [0 2 5 6 8 11]\n # print(\"Cumsum\", cumsum)\n # Cumsum now contains the number of free spots at the left -- it is computed separetely for each graph\n # An offset is added on the result\n\n # Add virtual edges to the sampled edges to mark the end of each graph\n num_sampled_edges = len(sampled_indices)\n num_virtual_nodes = len(sq_offset)\n num_free_spots_indices = len(cumsum)\n\n # Group the different vectors together: the existing edges, the virtual nodes and the free spots\n grouped = torch.cat((samp_ind_w_offset, virtual_nodes, cumsum))\n # print(\"grouped\", grouped)\n sorted, argsort = torch.sort(grouped)\n # print(\"sorted\", sorted)\n # Create the masks corresponding to these 3 types of objects\n num_total = num_sampled_edges + num_virtual_nodes + num_free_spots_indices\n # mask is created for virtual nodes, in order to reduce the offset for cumsum\n virtual_sampled_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n virtual_sampled_mask[\n num_sampled_edges : num_sampled_edges + num_virtual_nodes\n ] = True\n virtual_sampled_mask = virtual_sampled_mask[argsort]\n\n free_spots_ind_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n free_spots_ind_mask[-num_free_spots_indices:] = True\n free_spots_ind_mask = free_spots_ind_mask[argsort]\n\n sampled_ind_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n sampled_ind_mask[:num_sampled_edges] = True\n sampled_ind_mask = sampled_ind_mask[argsort]\n\n # to_shift tells by how much to shift sampled and virtual edges\n to_shift = torch.cumsum(free_spots_ind_mask, dim=0) # - sampled_edge_batch\n # print(\"to_shift\", to_shift)\n new_indices = sorted + to_shift\n # remove epsilon added to sampled edges\n new_indices = new_indices[sampled_ind_mask] - epsilon\n # remove cumsum_offset to unify the indices of different graphs from cumsum_mask\n # 1 is added to compensate the fact that cumsum is computed with virtual nodes\n cumsum_offset = to_shift[virtual_sampled_mask.bool()][sampled_edge_batch] + 1\n cumsum_offset[unit_graph_mask_offset[sampled_edge_batch]] = (\n cumsum_offset[unit_graph_mask_offset[sampled_edge_batch]] + 1\n )\n # print(\"Cumsum offset\", cumsum_offset)\n # remove sq_offset contained by sorted\n new_indices = new_indices - cumsum_offset - sq_offset[sampled_edge_batch]\n # print(\"New indices long\", new_indices)\n new_indices = new_indices.round()\n # print('Existing edge indices', existing_indices)\n # Convert to matrix index.\n new_edge_index = condensed_to_matrix_index_batch(\n condensed_index=new_indices,\n num_nodes=num_nodes,\n edge_batch=sampled_edge_batch,\n ptr=offset,\n )\n\n # # debugging\n # # check if there are repeated edges\n # print('smallest graph size is {}'.format(num_nodes.min()))\n # existing_ind_w_offset = existing_indices + sq_offset[existing_edge_batch]\n # samp_ind_w_offset = new_indices + sq_offset[sampled_edge_batch]\n # repeated = existing_ind_w_offset.round().unsqueeze(1) == samp_ind_w_offset.round().unsqueeze(0)\n # repeated_ind = torch.where(repeated)\n # if repeated.sum()>0:\n # print('repeated edges')\n # import pdb; pdb.set_trace()\n # cur_shift = to_shift[sampled_ind_mask][1188] - cumsum_offset[1188]\n\n return new_edge_index" }, { "identifier": "sampled_condensed_indices_uniformly", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sampled_condensed_indices_uniformly(\n max_condensed_value, num_edges_to_sample, return_mask=False\n):\n \"\"\"Max_condensed value: (bs) long tensor\n num_edges_to_sample: (bs) long tensor\n Return: condensed_index e.g. [0 1 3 0 2]\n \"\"\"\n assert (0 <= num_edges_to_sample).all(), (\n num_edges_to_sample <= max_condensed_value\n ).all()\n batch_size = max_condensed_value.shape[0]\n device = max_condensed_value.device\n\n if (\n len(torch.unique(max_condensed_value)) == 1\n and len(torch.unique(num_edges_to_sample)) == 1\n ):\n max_val = max_condensed_value[0]\n to_sample = num_edges_to_sample[0]\n sampled_condensed = torch.multinomial(\n torch.ones(max_val, device=device), num_samples=to_sample, replacement=False\n )\n edge_batch = torch.zeros(\n num_edges_to_sample[0], device=device, dtype=torch.long\n )\n if batch_size == 1:\n if return_mask:\n condensed_mask = torch.arange(num_edges_to_sample[0], device=device)\n return sampled_condensed, edge_batch, condensed_mask\n\n return sampled_condensed, edge_batch\n\n # Case of several graphs of the same size\n # Repeat the edge_index for each graph and aggregate them\n sampled_condensed_repeated = (\n sampled_condensed.unsqueeze(0).expand(batch_size, -1).flatten()\n )\n edge_batch = torch.arange(batch_size, device=device).repeat_interleave(\n to_sample\n )\n\n if return_mask:\n condensed_mask = torch.arange(num_edges_to_sample[0], device=device)\n condensed_mask = (\n condensed_mask.unsqueeze(0).expand(batch_size, -1).flatten()\n )\n return sampled_condensed_repeated, edge_batch, condensed_mask\n\n return sampled_condensed_repeated, edge_batch\n\n # Most general case: graphs of varying sizes\n max_size = torch.max(max_condensed_value)\n # import pdb; pdb.set_trace()\n if max_size > 10**7:\n print(\"[Warning]: sampling random edges might bew slow\")\n\n randperm_full = torch.randperm(max_size, device=device) # (max_condensed)\n randperm_expanded = randperm_full.unsqueeze(0).expand(\n batch_size, -1\n ) # (bs, max_condensed)\n\n # General goal: keep the indices on the left that are not too big for each graph\n # Mask1 is used to mask the indices that are too large for current graph\n mask1 = randperm_expanded < max_condensed_value.unsqueeze(1) # (bs, max_condensed)\n\n # Cumsum(mask1) is the number of valid indices on the left of each index\n # Mask2 will select the right number of indices on the left\n mask2 = torch.cumsum(mask1, dim=1) <= num_edges_to_sample.unsqueeze(\n 1\n ) # (bs, max_condensed)\n complete_mask = mask1 * mask2\n condensed_index = randperm_expanded[complete_mask] # (sum(num_edges_per_graph))\n edge_batch = (\n torch.arange(batch_size, device=device)\n .unsqueeze(1)\n .expand(-1, max_size)[complete_mask]\n )\n\n if return_mask:\n complete_mask = complete_mask.cumsum(-1)[complete_mask] - 1\n return condensed_index, edge_batch, complete_mask\n\n return condensed_index, edge_batch" }, { "identifier": "SignNetNodeEncoder", "path": "sparse_diffusion/models/sign_pos_encoder.py", "snippet": "class SignNetNodeEncoder(torch.nn.Module):\n \"\"\"SignNet Positional Embedding node encoder.\n https://arxiv.org/abs/2202.13013\n https://github.com/cptq/SignNet-BasisNet\n Uses precomputated Laplacian eigen-decomposition, but instead\n of eigen-vector sign flipping + DeepSet/Transformer, computes the PE as:\n SignNetPE(v_1, ... , v_k) = \\rho ( [\\phi(v_i) + \\rhi(−v_i)]^k_i=1 )\n where \\phi is GIN network applied to k first non-trivial eigenvectors, and\n \\rho is an MLP if k is a constant, but if all eigenvectors are used then\n \\rho is DeepSet with sum-pooling.\n SignNetPE of size dim_pe will get appended to each node feature vector.\n If `expand_x` set True, original node features will be first linearly\n projected to (dim_emb - dim_pe) size and the concatenated with SignNetPE.\n Args:\n dim_emb: Size of final node embedding\n expand_x: Expand node features `x` from dim_in to (dim_emb - dim_pe)\n \"\"\"\n\n def __init__(self, dataset_infos, sn_hidden_dim, k_node, expand_x=True):\n \"\"\"\n Initialize the model with the default parameters.\n \"\"\"\n super().__init__()\n self.dataset_infos = dataset_infos\n self.k_node = k_node\n dim_in = (\n dataset_infos.input_dims.X + dataset_infos.input_dims.charge - self.k_node\n ) # Expected original input node features dim\n dim_emb = sn_hidden_dim\n\n dim_pe = 16 # Size of PE embedding\n model_type = \"DeepSet\" # Encoder NN model type for SignNet\n\n if model_type not in [\"MLP\", \"DeepSet\"]:\n raise ValueError(f\"Unexpected SignNet model {model_type}\")\n self.model_type = model_type\n sign_inv_layers = 3 # Num. layers in \\phi GNN part\n rho_layers = 1 # Num. layers in \\rho MLP/DeepSet\n\n if rho_layers < 1:\n raise ValueError(f\"Num layers in rho model has to be positive.\")\n\n max_freqs = 10 # Num. eigenvectors (frequencies)\n self.pass_as_var = False # Pass PE also as a separate variable\n\n if dim_emb - dim_pe < 1:\n raise ValueError(\n f\"SignNet PE size {dim_pe} is too large for \"\n f\"desired embedding size of {dim_emb}.\"\n )\n\n if expand_x:\n self.linear_x = nn.Linear(dim_in, dim_emb - dim_pe)\n self.expand_x = expand_x\n\n # Sign invariant neural network.\n if self.model_type == \"MLP\":\n self.sign_inv_net = GINDeepSigns(\n in_channels=1,\n hidden_channels=64,\n out_channels=4,\n num_layers=sign_inv_layers,\n k=max_freqs,\n dim_pe=dim_pe,\n rho_num_layers=rho_layers,\n use_bn=True,\n dropout=0.0,\n activation=\"relu\",\n )\n elif self.model_type == \"DeepSet\":\n self.sign_inv_net = MaskedGINDeepSigns(\n in_channels=1,\n hidden_channels=64,\n out_channels=4,\n num_layers=sign_inv_layers,\n dim_pe=dim_pe,\n rho_num_layers=rho_layers,\n use_bn=True,\n dropout=0.0,\n activation=\"relu\",\n )\n else:\n raise ValueError(f\"Unexpected model {self.model_type}\")\n\n def forward(self, x, edge_index, batch):\n eigvecs = x[:, -self.k_node:]\n x = x[:, : -self.k_node]\n\n pos_enc = eigvecs.unsqueeze(-1) # (Num nodes) x (Num Eigenvectors) x 1\n\n empty_mask = torch.isnan(pos_enc)\n pos_enc[empty_mask] = 0 # (Num nodes) x (Num Eigenvectors) x 1\n\n # SignNet\n pos_enc = self.sign_inv_net(\n pos_enc, edge_index, batch\n ) # (Num nodes) x (pos_enc_dim)\n\n # Expand node features if needed\n if self.expand_x:\n h = self.linear_x(x)\n else:\n h = x\n\n # Concatenate final PEs to input embedding\n x = torch.cat((h, pos_enc), 1)\n # Keep PE also separate in a variable (e.g. for skip connections to input)\n\n return x" } ]
import time import os import math import pickle import json import torch import wandb import numpy as np import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from tqdm import tqdm from models.conv_transformer_model import GraphTransformerConv from diffusion.noise_schedule import ( PredefinedNoiseScheduleDiscrete, MarginalUniformTransition, ) from metrics.train_metrics import TrainLossDiscrete from metrics.abstract_metrics import SumExceptBatchMetric, SumExceptBatchKL, NLL from analysis.visualization import Visualizer from sparse_diffusion import utils from sparse_diffusion.diffusion import diffusion_utils from sparse_diffusion.diffusion.sample_edges_utils import ( get_computational_graph, mask_query_graph_from_comp_graph, sample_non_existing_edge_attr, condensed_to_matrix_index_batch, ) from sparse_diffusion.diffusion.sample_edges import ( sample_query_edges, sample_non_existing_edges_batched, sampled_condensed_indices_uniformly, ) from sparse_diffusion.models.sign_pos_encoder import SignNetNodeEncoder
10,971
def sample_node_edge( self, pred, p_s_and_t_given_0_X, p_s_and_t_given_0_E, node_mask ): _, prob_X = self.sample_node(pred.X, p_s_and_t_given_0_X, node_mask) _, prob_E = self.sample_edge(pred.E, p_s_and_t_given_0_E, node_mask) sampled_s = diffusion_utils.sample_discrete_features( prob_X, prob_E, node_mask=node_mask ) return sampled_s def sample_sparse_node(self, pred_node, p_s_and_t_given_0_X): # Normalize predictions pred_X = F.softmax(pred_node, dim=-1) # N, dx # Dim of the second tensor: N, dx, dx weighted_X = pred_X.unsqueeze(-1) * p_s_and_t_given_0_X # N, dx, dx unnormalized_prob_X = weighted_X.sum(dim=1) # N, dx unnormalized_prob_X[ torch.sum(unnormalized_prob_X, dim=-1) == 0 ] = 1e-5 # TODO: delete/masking? prob_X = unnormalized_prob_X / torch.sum( unnormalized_prob_X, dim=-1, keepdim=True ) # N, dx assert ((prob_X.sum(dim=-1) - 1).abs() < 1e-4).all() X_t = prob_X.multinomial(1)[:, 0] return X_t def sample_sparse_edge(self, pred_edge, p_s_and_t_given_0_E): # Normalize predictions pred_E = F.softmax(pred_edge, dim=-1) # N, d0 # Dim of the second tensor: N, d0, dt-1 weighted_E = pred_E.unsqueeze(-1) * p_s_and_t_given_0_E # N, d0, dt-1 unnormalized_prob_E = weighted_E.sum(dim=1) # N, dt-1 unnormalized_prob_E[torch.sum(unnormalized_prob_E, dim=-1) == 0] = 1e-5 prob_E = unnormalized_prob_E / torch.sum( unnormalized_prob_E, dim=-1, keepdim=True ) assert ((prob_E.sum(dim=-1) - 1).abs() < 1e-4).all() E_t = prob_E.multinomial(1)[:, 0] return E_t def sample_sparse_node_edge( self, pred_node, pred_edge, p_s_and_t_given_0_X, p_s_and_t_given_0_E, pred_charge, p_s_and_t_given_0_charge, ): sampled_node = self.sample_sparse_node(pred_node, p_s_and_t_given_0_X).long() sampled_edge = self.sample_sparse_edge(pred_edge, p_s_and_t_given_0_E).long() if pred_charge.size(-1) > 0: sampled_charge = self.sample_sparse_node( pred_charge, p_s_and_t_given_0_charge ).long() else: sampled_charge = pred_charge return sampled_node, sampled_edge, sampled_charge def sample_p_zs_given_zt(self, s_float, t_float, data): """ Samples from zs ~ p(zs | zt). Only used during sampling. if last_step, return the graph prediction as well """ node = data.node edge_index = data.edge_index edge_attr = data.edge_attr y = data.y charge = data.charge ptr = data.ptr batch = data.batch beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # Retrieve transitions matrix Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) Qsb = self.transition_model.get_Qt_bar(alpha_s_bar, self.device) Qt = self.transition_model.get_Qt(beta_t, self.device) # Prior distribution # (N, dx, dx) p_s_and_t_given_0_X = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=node, batch=batch, Qt=Qt.X, Qsb=Qsb.X, Qtb=Qtb.X ) ) p_s_and_t_given_0_charge = None if self.use_charge: p_s_and_t_given_0_charge = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=charge, batch=batch, Qt=Qt.charge, Qsb=Qsb.charge, Qtb=Qtb.charge, ) ) # prepare sparse information num_nodes = ptr.diff().long() num_edges = (num_nodes * (num_nodes - 1) / 2).long() # If we had one graph, we will iterate on all edges for each step # we also make sure that the non existing edge number remains the same with the training process ( all_condensed_index, all_edge_batch, all_edge_mask,
class DiscreteDenoisingDiffusion(pl.LightningModule): model_dtype = torch.float32 best_val_nll = 1e8 val_counter = 0 start_epoch_time = None val_iterations = None def __init__( self, cfg, dataset_infos, train_metrics, extra_features, domain_features, val_sampling_metrics, test_sampling_metrics, ): super().__init__() self.in_dims = dataset_infos.input_dims self.out_dims = dataset_infos.output_dims self.use_charge = cfg.model.use_charge and self.out_dims.charge > 1 self.node_dist = dataset_infos.nodes_dist self.extra_features = extra_features self.domain_features = domain_features self.sign_net = cfg.model.sign_net if not self.sign_net: cfg.model.sn_hidden_dim = 0 # sparse settings self.edge_fraction = cfg.model.edge_fraction self.autoregressive = cfg.model.autoregressive self.cfg = cfg self.test_variance = cfg.general.test_variance self.dataset_info = dataset_infos self.visualization_tools = Visualizer(dataset_infos) self.name = cfg.general.name self.T = cfg.model.diffusion_steps self.train_loss = TrainLossDiscrete(cfg.model.lambda_train, self.edge_fraction) self.train_metrics = train_metrics self.val_sampling_metrics = val_sampling_metrics self.test_sampling_metrics = test_sampling_metrics # TODO: transform to torchmetrics.MetricCollection self.val_nll = NLL() # self.val_metrics = torchmetrics.MetricCollection([]) self.val_X_kl = SumExceptBatchKL() self.val_E_kl = SumExceptBatchKL() self.val_X_logp = SumExceptBatchMetric() self.val_E_logp = SumExceptBatchMetric() self.best_nll = 1e8 self.best_epoch = 0 # TODO: transform to torchmetrics.MetricCollection self.test_nll = NLL() self.test_X_kl = SumExceptBatchKL() self.test_E_kl = SumExceptBatchKL() self.test_X_logp = SumExceptBatchMetric() self.test_E_logp = SumExceptBatchMetric() if self.use_charge: self.val_charge_kl = SumExceptBatchKL() self.val_charge_logp = SumExceptBatchMetric() self.test_charge_kl = SumExceptBatchKL() self.test_charge_logp = SumExceptBatchMetric() self.model = GraphTransformerConv( n_layers=cfg.model.n_layers, input_dims=self.in_dims, hidden_dims=cfg.model.hidden_dims, output_dims=self.out_dims, sn_hidden_dim=cfg.model.sn_hidden_dim, output_y=cfg.model.output_y, dropout=cfg.model.dropout ) # whether to use sign net if self.sign_net and cfg.model.extra_features == "all": self.sign_net = SignNetNodeEncoder( dataset_infos, cfg.model.sn_hidden_dim, cfg.model.num_eigenvectors ) # whether to use scale layers self.scaling_layer = cfg.model.scaling_layer ( self.node_scaling_layer, self.edge_scaling_layer, self.graph_scaling_layer, ) = self.get_scaling_layers() self.noise_schedule = PredefinedNoiseScheduleDiscrete( cfg.model.diffusion_noise_schedule, timesteps=cfg.model.diffusion_steps ) # Marginal transition node_types = self.dataset_info.node_types.float() x_marginals = node_types / torch.sum(node_types) edge_types = self.dataset_info.edge_types.float() e_marginals = edge_types / torch.sum(edge_types) if not self.use_charge: charge_marginals = node_types.new_zeros(0) else: charge_marginals = ( self.dataset_info.charge_types * node_types[:, None] ).sum(dim=0) print( f"Marginal distribution of the classes: {x_marginals} for nodes, {e_marginals} for edges" ) self.transition_model = MarginalUniformTransition( x_marginals=x_marginals, e_marginals=e_marginals, y_classes=self.out_dims.y, charge_marginals=charge_marginals, ) self.limit_dist = utils.PlaceHolder( X=x_marginals, E=e_marginals, y=torch.ones(self.out_dims.y) / self.out_dims.y, charge=charge_marginals, ) self.save_hyperparameters(ignore=["train_metrics", "sampling_metrics"]) self.log_every_steps = cfg.general.log_every_steps self.number_chain_steps = cfg.general.number_chain_steps def training_step(self, data, i): # The above code is using the Python debugger module `pdb` to set a breakpoint at a specific # line of code. When the code is executed, it will pause at that line and allow you to # interactively debug the program. if data.edge_index.numel() == 0: print("Found a batch with no edges. Skipping.") return # Map discrete classes to one hot encoding data = self.dataset_info.to_one_hot(data) start_time = time.time() sparse_noisy_data = self.apply_sparse_noise(data) if hasattr(self, "apply_noise_time"): self.apply_noise_time.append(round(time.time() - start_time, 2)) # Sample the query edges and build the computational graph = union(noisy graph, query edges) start_time = time.time() # print(data.ptr.diff()) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) query_mask, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"], ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr self.sample_query_time.append(round(time.time() - start_time, 2)) sparse_pred = self.forward(sparse_noisy_data) # Compute the loss on the query edges only sparse_pred.edge_attr = sparse_pred.edge_attr[query_mask] sparse_pred.edge_index = comp_edge_index[:, query_mask] # mask true label for query edges # We have the true edge index at time 0, and the query edge index at time t. This function # merge the query edges and edge index at time 0, delete repeated one, and retune the mask # for the true attr of query edges start_time = time.time() ( query_mask2, true_comp_edge_attr, true_comp_edge_index, ) = mask_query_graph_from_comp_graph( triu_query_edge_index=triu_query_edge_index, edge_index=data.edge_index, edge_attr=data.edge_attr, num_classes=self.out_dims.E, ) query_true_edge_attr = true_comp_edge_attr[query_mask2] assert ( true_comp_edge_index[:, query_mask2] - sparse_pred.edge_index == 0 ).all() self.query_count.append(len(query_true_edge_attr)) true_data = utils.SparsePlaceHolder( node=data.x, charge=data.charge, edge_attr=query_true_edge_attr, edge_index=sparse_pred.edge_index, y=data.y, batch=data.batch, ) true_data.collapse() # Map one-hot to discrete class self.coalesce_time.append(round(time.time() - start_time, 2)) # Loss calculation start_time = time.time() loss = self.train_loss.forward( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.train_metrics( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.loss_time.append(round(time.time() - start_time, 2)) return {"loss": loss} def on_fit_start(self) -> None: print( f"Size of the input features:" f" X {self.in_dims.X}, E {self.in_dims.E}, charge {self.in_dims.charge}, y {self.in_dims.y}" ) if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once def on_train_epoch_start(self) -> None: self.print("Starting train epoch...") self.start_epoch_time = time.time() self.train_loss.reset() self.train_metrics.reset() self.query_count = [] self.apply_noise_time = [] self.extra_data_time = [] self.forward_time = [] self.sample_query_time = [] self.coalesce_time = [] self.loss_time = [] self.cycle_time = [] self.eigen_time = [] def on_train_epoch_end(self) -> None: epoch_loss = self.train_loss.log_epoch_metrics() self.print( f"Epoch {self.current_epoch} finished: X: {epoch_loss['train_epoch/x_CE'] :.2f} -- " f"E: {epoch_loss['train_epoch/E_CE'] :.2f} --" f"charge: {epoch_loss['train_epoch/charge_CE'] :.2f} --" f"y: {epoch_loss['train_epoch/y_CE'] :.2f}" ) self.train_metrics.log_epoch_metrics() if wandb.run: wandb.log({"epoch": self.current_epoch}, commit=False) def on_validation_epoch_start(self) -> None: val_metrics = [self.val_nll, self.val_X_kl, self.val_E_kl, self.val_X_logp, self.val_E_logp, self.val_sampling_metrics] if self.use_charge: val_metrics.extend([self.val_charge_kl, self.val_charge_logp]) for metric in val_metrics: metric.reset() def validation_step(self, data, i): data = self.dataset_info.to_one_hot(data) sparse_noisy_data = self.apply_sparse_noise(data) # Sample the query edges and build the computational graph = union(noisy graph, query edges) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) _, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"] ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr sparse_pred = self.forward(sparse_noisy_data) # to dense dense_pred, node_mask = utils.to_dense( x=sparse_pred.node, edge_index=sparse_pred.edge_index, edge_attr=sparse_pred.edge_attr, batch=sparse_pred.batch, charge=sparse_pred.charge, ) dense_original, _ = utils.to_dense( x=data.x, edge_index=data.edge_index, edge_attr=data.edge_attr, batch=data.batch, charge=data.charge, ) noisy_data = utils.densify_noisy_data(sparse_noisy_data) nll = self.compute_val_loss( dense_pred, noisy_data, dense_original.X, dense_original.E, dense_original.y, node_mask, charge=dense_original.charge, test=False, ) return {"loss": nll} def on_validation_epoch_end(self) -> None: metrics = [ self.val_nll.compute(), self.val_X_kl.compute() * self.T, self.val_E_kl.compute() * self.T, self.val_X_logp.compute(), self.val_E_logp.compute(), ] if self.use_charge: metrics += [ self.val_charge_kl.compute() * self.T, self.val_charge_logp.compute(), ] else: metrics += [-1, -1] if self.val_nll.compute() < self.best_nll: self.best_epoch = self.current_epoch self.best_nll = self.val_nll.compute() metrics += [self.best_epoch, self.best_nll] if wandb.run: wandb.log( { "val/epoch_NLL": metrics[0], "val/X_kl": metrics[1], "val/E_kl": metrics[2], "val/X_logp": metrics[3], "val/E_logp": metrics[4], "val/charge_kl": metrics[5], "val/charge_logp": metrics[6], "val/best_nll_epoch": metrics[7], "val/best_nll": metrics[8], }, commit=False, ) self.print( f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ", f"Val Edge type KL: {metrics[2] :.2f}", ) # Log val nll with default Lightning logger, so it can be monitored by checkpoint callback val_nll = metrics[0] self.log("val/epoch_NLL", val_nll, sync_dist=True) if val_nll < self.best_val_nll: self.best_val_nll = val_nll self.print( "Val loss: %.4f \t Best val loss: %.4f\n" % (val_nll, self.best_val_nll) ) self.val_counter += 1 print("Starting to sample") if self.val_counter % self.cfg.general.sample_every_val == 0: start = time.time() samples_left_to_generate = self.cfg.general.samples_to_generate samples_left_to_save = self.cfg.general.samples_to_save chains_left_to_save = self.cfg.general.chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") print('multi-gpu metrics for uniqueness is not accurate in the validation step.') generated_graphs = [] ident = 0 while samples_left_to_generate > 0: bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=ident, batch_size=to_generate, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) generated_graphs.append(sampled_batch) ident += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save generated_graphs = utils.concat_sparse_graphs(generated_graphs) print( f"Sampled {generated_graphs.batch.max().item()+1} batches on local rank {self.local_rank}. ", "Sampling took {time.time() - start:.2f} seconds\n" ) print("Computing sampling metrics...") self.val_sampling_metrics.compute_all_metrics( generated_graphs, self.current_epoch, local_rank=self.local_rank ) def on_test_epoch_start(self) -> None: print("Starting test...") if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once test_metrics = [self.test_nll, self.test_X_kl, self.test_E_kl, self.test_X_logp, self.test_E_logp, self.test_sampling_metrics] if self.use_charge: test_metrics.extend([self.test_charge_kl, self.test_charge_logp]) for metric in test_metrics: metric.reset() def test_step(self, data, i): pass def on_test_epoch_end(self) -> None: """Measure likelihood on a test set and compute stability metrics.""" if self.cfg.general.generated_path: self.print("Loading generated samples...") samples = np.load(self.cfg.general.generated_path) with open(self.cfg.general.generated_path, "rb") as f: samples = pickle.load(f) else: samples_left_to_generate = self.cfg.general.final_model_samples_to_generate samples_left_to_save = self.cfg.general.final_model_samples_to_save chains_left_to_save = self.cfg.general.final_model_chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") samples = [] id = 0 while samples_left_to_generate > 0: print( f"Samples left to generate: {samples_left_to_generate}/" f"{self.cfg.general.final_model_samples_to_generate}", end="", flush=True, ) bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=id, batch_size=to_generate, num_nodes=None, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) samples.append(sampled_batch) id += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save print("Saving the generated graphs") samples = utils.concat_sparse_graphs(samples) filename = f"generated_samples1.txt" # Save the samples list as pickle to a file that depends on the local rank # This is needed to avoid overwriting the same file on different GPUs with open(f"generated_samples_rank{self.local_rank}.pkl", "wb") as f: pickle.dump(samples, f) # This line is used to sync between gpus self._trainer.strategy.barrier() for i in range(2, 10): if os.path.exists(filename): filename = f"generated_samples{i}.txt" else: break with open(filename, "w") as f: for i in range(samples.batch.max().item() + 1): atoms = samples.node[samples.batch == i] f.write(f"N={atoms.shape[0]}\n") atoms = atoms.tolist() f.write("X: \n") for at in atoms: f.write(f"{at} ") f.write("\n") f.write("E: \n") bonds = samples.edge_attr[samples.batch[samples.edge_index[0]] == i] for bond in bonds: f.write(f"{bond} ") f.write("\n") print("Saved.") print("Computing sampling metrics...") # Load the pickles of the other GPUs samples = [] for i in range(self._trainer.num_devices): with open(f"generated_samples_rank{i}.pkl", "rb") as f: samples.append(pickle.load(f)) samples = utils.concat_sparse_graphs(samples) print('saving all samples') with open(f"generated_samples.pkl", "wb") as f: pickle.dump(samples, f) if self.test_variance == 1: to_log, _ = self.test_sampling_metrics.compute_all_metrics( samples, self.current_epoch, self.local_rank ) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) else: to_log = {} for i in range(self.test_variance): start_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * i) end_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * (i + 1)) cur_samples = utils.split_samples(samples, start_idx, end_idx) cur_to_log, _ = self.test_sampling_metrics.compute_all_metrics(cur_samples, self.current_epoch, self.local_rank) if i == 0: to_log = {i: [cur_to_log[i]] for i in cur_to_log} else: to_log = {i: to_log[i].append(cur_to_log[i]) for i in cur_to_log} # get the variance and mean value of the metrics final_to_log = {i: [np.mean(i), np.var(i)] for i in to_log} to_log.update(final_to_log) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}_fold{self.test_variance}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) print("Test sampling metrics computed.") def apply_sparse_noise(self, data): """Sample noise and apply it to the data.""" bs = int(data.batch.max() + 1) t_int = torch.randint( 1, self.T + 1, size=(bs, 1), device=self.device ).float() # (bs, 1) s_int = t_int - 1 t_float = t_int / self.T s_float = s_int / self.T # beta_t and alpha_s_bar are used for denoising/loss computation beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) # (bs, 1) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # (bs, 1) Qtb = self.transition_model.get_Qt_bar( alpha_t_bar, device=self.device ) # (bs, dx_in, dx_out), (bs, de_in, de_out) assert (abs(Qtb.X.sum(dim=2) - 1.0) < 1e-4).all(), Qtb.X.sum(dim=2) - 1 assert (abs(Qtb.E.sum(dim=2) - 1.0) < 1e-4).all() # Compute transition probabilities # get charge distribution if self.use_charge: prob_charge = data.charge.unsqueeze(1) @ Qtb.charge[data.batch] charge_t = prob_charge.squeeze(1).multinomial(1).flatten() # (N, ) charge_t = F.one_hot(charge_t, num_classes=self.out_dims.charge) else: charge_t = data.charge # Diffuse sparse nodes and sample sparse node labels probN = data.x.unsqueeze(1) @ Qtb.X[data.batch] # (N, dx) node_t = probN.squeeze(1).multinomial(1).flatten() # (N, ) # count node numbers and edge numbers for existing edges for each graph num_nodes = data.ptr.diff().long() batch_edge = data.batch[data.edge_index[0]] num_edges = torch.zeros(num_nodes.shape).to(self.device) unique, counts = torch.unique(batch_edge, sorted=True, return_counts=True) num_edges[unique] = counts.float() # count number of non-existing edges for each graph num_neg_edge = ((num_nodes - 1) * num_nodes - num_edges) / 2 # (bs, ) # Step1: diffuse on existing edges # get edges defined in the top triangle of the adjacency matrix dir_edge_index, dir_edge_attr = utils.undirected_to_directed( data.edge_index, data.edge_attr ) batch_edge = data.batch[dir_edge_index[0]] batch_Qtb = Qtb.E[batch_edge] probE = dir_edge_attr.unsqueeze(1) @ batch_Qtb dir_edge_attr = probE.squeeze(1).multinomial(1).flatten() # Step2: diffuse on non-existing edges # get number of new edges according to Qtb emerge_prob = Qtb.E[:, 0, 1:].sum(-1) # (bs, ) num_emerge_edges = ( torch.distributions.binomial.Binomial(num_neg_edge, emerge_prob) .sample() .int() ) # combine existing and non-existing edges (both are directed, i.e. triu) if num_emerge_edges.max() > 0: # sample non-existing edges neg_edge_index = sample_non_existing_edges_batched( num_edges_to_sample=num_emerge_edges, existing_edge_index=dir_edge_index, num_nodes=num_nodes, batch=data.batch, ) neg_edge_attr = sample_non_existing_edge_attr( query_edges_dist_batch=Qtb.E[:, 0, 1:], num_edges_to_sample=num_emerge_edges, ) E_t_attr = torch.hstack([dir_edge_attr, neg_edge_attr]) E_t_index = torch.hstack([dir_edge_index, neg_edge_index]) else: E_t_attr = dir_edge_attr E_t_index = dir_edge_index # mask non-existing edges mask = E_t_attr != 0 E_t_attr = E_t_attr[mask] E_t_index = E_t_index[:, mask] E_t_index, E_t_attr = utils.to_undirected(E_t_index, E_t_attr) E_t_attr = F.one_hot(E_t_attr, num_classes=self.out_dims.E) node_t = F.one_hot(node_t, num_classes=self.out_dims.X) sparse_noisy_data = { "t_int": t_int, "t_float": t_float, "beta_t": beta_t, "alpha_s_bar": alpha_s_bar, "alpha_t_bar": alpha_t_bar, "node_t": node_t, "edge_index_t": E_t_index, "edge_attr_t": E_t_attr, "comp_edge_index_t": None, "comp_edge_attr_t": None, # computational graph "y_t": data.y, "batch": data.batch, "ptr": data.ptr, "charge_t": charge_t, } return sparse_noisy_data def compute_val_loss(self, pred, noisy_data, X, E, y, node_mask, charge, test): """Computes an estimator for the variational lower bound. pred: (batch_size, n, total_features) noisy_data: dict X, E, y : (bs, n, dx), (bs, n, n, de), (bs, dy) node_mask : (bs, n) Output: nll (size 1) """ t = noisy_data["t_float"] # 1. N = node_mask.sum(1).long() log_pN = self.node_dist.log_prob(N) # 2. The KL between q(z_T | x) and p(z_T) = Uniform(1/num_classes). Should be close to zero. kl_prior = self.kl_prior(X, E, node_mask, charge=charge) # 3. Diffusion loss loss_all_t = self.compute_Lt( X, E, y, charge, pred, noisy_data, node_mask, test=test ) # Combine terms nlls = - log_pN + kl_prior + loss_all_t assert (~nlls.isnan()).all(), f"NLLs contain NaNs: {nlls}" assert len(nlls.shape) == 1, f"{nlls.shape} has more than only batch dim." # Update NLL metric object and return batch nll nll = (self.test_nll if test else self.val_nll)(nlls) # Average over the batch if wandb.run: wandb.log( { "kl prior": kl_prior.mean(), "Estimator loss terms": loss_all_t.mean(), "log_pn": log_pN.mean(), "val_nll": nll, "epoch": self.current_epoch }, commit=False, ) return nll def kl_prior(self, X, E, node_mask, charge): """Computes the KL between q(z1 | x) and the prior p(z1) = Normal(0, 1). This is essentially a lot of work for something that is in practice negligible in the loss. However, you compute it so that you see it when you've made a mistake in your noise schedule. """ # Compute the last alpha value, alpha_T. ones = torch.ones((X.size(0), 1), device=X.device) Ts = self.T * ones alpha_t_bar = self.noise_schedule.get_alpha_bar(t_int=Ts) # (bs, 1) Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) # Compute transition probabilities probX = X @ Qtb.X # (bs, n, dx_out) probE = E @ Qtb.E.unsqueeze(1) # (bs, n, n, de_out) assert probX.shape == X.shape bs, n, _ = probX.shape limit_X = self.limit_dist.X[None, None, :].expand(bs, n, -1).type_as(probX) limit_E = ( self.limit_dist.E[None, None, None, :].expand(bs, n, n, -1).type_as(probE) ) if self.use_charge: prob_charge = charge @ Qtb.charge # (bs, n, de_out) limit_charge = ( self.limit_dist.charge[None, None, :] .expand(bs, n, -1) .type_as(prob_charge) ) limit_charge = limit_charge.clone() else: prob_charge = limit_charge = None # Make sure that masked rows do not contribute to the loss ( limit_dist_X, limit_dist_E, probX, probE, limit_dist_charge, prob_charge, ) = diffusion_utils.mask_distributions( true_X=limit_X.clone(), true_E=limit_E.clone(), pred_X=probX, pred_E=probE, node_mask=node_mask, true_charge=limit_charge, pred_charge=prob_charge, ) kl_distance_X = F.kl_div( input=probX.log(), target=limit_dist_X, reduction="none" ) kl_distance_E = F.kl_div( input=probE.log(), target=limit_dist_E, reduction="none" ) # not all edges are used for loss calculation E_mask = torch.logical_or( kl_distance_E.sum(-1).isnan(), kl_distance_E.sum(-1).isinf() ) kl_distance_E[E_mask] = 0 X_mask = torch.logical_or( kl_distance_X.sum(-1).isnan(), kl_distance_X.sum(-1).isinf() ) kl_distance_X[X_mask] = 0 loss = diffusion_utils.sum_except_batch( kl_distance_X ) + diffusion_utils.sum_except_batch(kl_distance_E) # The above code is using the Python debugger module `pdb` to set a breakpoint in the code. # When the code is executed, it will pause at this line and allow you to interactively debug # the program. if self.use_charge: kl_distance_charge = F.kl_div( input=prob_charge.log(), target=limit_dist_charge, reduction="none" ) kl_distance_charge[X_mask] = 0 loss = loss + diffusion_utils.sum_except_batch(kl_distance_charge) assert (~loss.isnan()).any() return loss def compute_Lt(self, X, E, y, charge, pred, noisy_data, node_mask, test): pred_probs_X = F.softmax(pred.X, dim=-1) pred_probs_E = F.softmax(pred.E, dim=-1) if self.use_charge: pred_probs_charge = F.softmax(pred.charge, dim=-1) else: pred_probs_charge = None charge = None Qtb = self.transition_model.get_Qt_bar(noisy_data["alpha_t_bar"], self.device) Qsb = self.transition_model.get_Qt_bar(noisy_data["alpha_s_bar"], self.device) Qt = self.transition_model.get_Qt(noisy_data["beta_t"], self.device) # Compute distributions to compare with KL bs, n, d = X.shape prob_true = diffusion_utils.posterior_distributions( X=X, E=E, X_t=noisy_data["X_t"], E_t=noisy_data["E_t"], charge=charge, charge_t=noisy_data["charge_t"], y_t=noisy_data["y_t"], Qt=Qt, Qsb=Qsb, Qtb=Qtb, ) prob_true.E = prob_true.E.reshape((bs, n, n, -1)) prob_pred = diffusion_utils.posterior_distributions( X=pred_probs_X, E=pred_probs_E, X_t=noisy_data["X_t"], E_t=noisy_data["E_t"], charge=pred_probs_charge, charge_t=noisy_data["charge_t"], y_t=noisy_data["y_t"], Qt=Qt, Qsb=Qsb, Qtb=Qtb, ) prob_pred.E = prob_pred.E.reshape((bs, n, n, -1)) # Reshape and filter masked rows ( prob_true_X, prob_true_E, prob_pred.X, prob_pred.E, prob_true.charge, prob_pred.charge, ) = diffusion_utils.mask_distributions( true_X=prob_true.X, true_E=prob_true.E, pred_X=prob_pred.X, pred_E=prob_pred.E, node_mask=node_mask, true_charge=prob_true.charge, pred_charge=prob_pred.charge, ) kl_x = (self.test_X_kl if test else self.val_X_kl)(prob_true_X, torch.log(prob_pred.X)) kl_e = (self.test_E_kl if test else self.val_E_kl)(prob_true_E, torch.log(prob_pred.E)) assert (~(kl_x + kl_e).isnan()).any() loss = kl_x + kl_e if self.use_charge: kl_charge = (self.test_charge_kl if test else self.val_charge_kl)( prob_true.charge, torch.log(prob_pred.charge) ) assert (~(kl_charge).isnan()).any() loss = loss + kl_charge return self.T * loss def reconstruction_logp(self, t, X, E, node_mask, charge): # Compute noise values for t = 0. t_zeros = torch.zeros_like(t) beta_0 = self.noise_schedule(t_zeros) Q0 = self.transition_model.get_Qt(beta_t=beta_0, device=self.device) probX0 = X @ Q0.X # (bs, n, dx_out) probE0 = E @ Q0.E.unsqueeze(1) # (bs, n, n, de_out) prob_charge0 = None if self.use_charge: prob_charge0 = charge @ Q0.charge sampled0 = diffusion_utils.sample_discrete_features( probX=probX0, probE=probE0, node_mask=node_mask, prob_charge=prob_charge0 ) X0 = F.one_hot(sampled0.X, num_classes=self.out_dims.X).float() E0 = F.one_hot(sampled0.E, num_classes=self.out_dims.E).float() y0 = sampled0.y assert (X.shape == X0.shape) and (E.shape == E0.shape) charge0 = X0.new_zeros((*X0.shape[:-1], 0)) if self.use_charge: charge0 = F.one_hot( sampled0.charge, num_classes=self.out_dims.charge ).float() sampled_0 = utils.PlaceHolder(X=X0, E=E0, y=y0, charge=charge0).mask(node_mask) # Predictions noisy_data = { "X_t": sampled_0.X, "E_t": sampled_0.E, "y_t": sampled_0.y, "node_mask": node_mask, "t_int": torch.zeros((X0.shape[0], 1), dtype=torch.long).to(self.device), "t_float": torch.zeros((X0.shape[0], 1), dtype=torch.float).to(self.device), "charge_t": sampled_0.charge, } sparse_noisy_data = utils.to_sparse( noisy_data["X_t"], noisy_data["E_t"], noisy_data["y_t"], node_mask, charge=noisy_data["charge_t"], ) noisy_data.update(sparse_noisy_data) noisy_data["comp_edge_index_t"] = sparse_noisy_data["edge_index_t"] noisy_data["comp_edge_attr_t"] = sparse_noisy_data["edge_attr_t"] pred0 = self.forward(noisy_data) pred0, _ = utils.to_dense( pred0.node, pred0.edge_index, pred0.edge_attr, pred0.batch, pred0.charge ) # Normalize predictions probX0 = F.softmax(pred0.X, dim=-1) probE0 = F.softmax(pred0.E, dim=-1) # Set masked rows to arbitrary values that don't contribute to loss probX0[~node_mask] = torch.ones(self.out_dims.X).type_as(probX0) probE0[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2))] = torch.ones( self.out_dims.E ).type_as(probE0) diag_mask = torch.eye(probE0.size(1)).type_as(probE0).bool() diag_mask = diag_mask.unsqueeze(0).expand(probE0.size(0), -1, -1) probE0[diag_mask] = torch.ones(self.out_dims.E).type_as(probE0) assert (~probX0.isnan()).any() assert (~probE0.isnan()).any() prob_charge0 = charge if self.use_charge: prob_charge0 = F.softmax(pred0.charge, dim=-1) prob_charge0[~node_mask] = torch.ones(self.out_dims.charge).type_as( prob_charge0 ) assert (~prob_charge0.isnan()).any() return utils.PlaceHolder(X=probX0, E=probE0, y=None, charge=prob_charge0) def forward_sparse(self, sparse_noisy_data): start_time = time.time() node = sparse_noisy_data["node_t"] edge_attr = sparse_noisy_data["edge_attr_t"].float() edge_index = sparse_noisy_data["edge_index_t"].to(torch.int64) y = sparse_noisy_data["y_t"] batch = sparse_noisy_data["batch"].long() if hasattr(self, "forward_time"): self.forward_time.append(round(time.time() - start_time, 2)) return self.model(node, edge_attr, edge_index, y, batch) def forward(self, noisy_data): """ noisy data contains: node_t, comp_edge_index_t, comp_edge_attr_t, batch """ # build the sparse_noisy_data for the forward function of the sparse model start_time = time.time() sparse_noisy_data = self.compute_extra_data(sparse_noisy_data=noisy_data) if self.sign_net and self.cfg.model.extra_features == "all": x = self.sign_net( sparse_noisy_data["node_t"], sparse_noisy_data["edge_index_t"], sparse_noisy_data["batch"], ) sparse_noisy_data["node_t"] = torch.hstack( [sparse_noisy_data["node_t"], x] ) if hasattr(self, "extra_data_time"): self.extra_data_time.append(round(time.time() - start_time, 2)) return self.forward_sparse(sparse_noisy_data) @torch.no_grad() def sample_batch( self, batch_id: int, batch_size: int, keep_chain: int, number_chain_steps: int, save_final: int, num_nodes=None, ): """ :param batch_id: int :param batch_size: int :param num_nodes: int, <int>tensor (batch_size) (optional) for specifying number of nodes :param save_final: int: number of predictions to save to file :param keep_chain: int: number of chains to save to file :param keep_chain_steps: number of timesteps to save for each chain :return: molecule_list. Each element of this list is a tuple (node_types, charge, positions) """ if num_nodes is None: num_nodes = self.node_dist.sample_n(batch_size, self.device) elif type(num_nodes) == int: num_nodes = num_nodes * torch.ones( batch_size, device=self.device, dtype=torch.int ) else: assert isinstance(num_nodes, torch.Tensor) num_nodes = num_nodes num_max = torch.max(num_nodes) # Build the masks arange = ( torch.arange(num_max, device=self.device) .unsqueeze(0) .expand(batch_size, -1) ) node_mask = arange < num_nodes.unsqueeze(1) # Sample noise -- z has size ( num_samples, num_nodes, num_features) sparse_sampled_data = diffusion_utils.sample_sparse_discrete_feature_noise( limit_dist=self.limit_dist, node_mask=node_mask ) assert number_chain_steps < self.T chain = utils.SparseChainPlaceHolder(keep_chain=keep_chain) # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1. for s_int in tqdm(reversed(range(self.T)), total=self.T): s_array = (s_int * torch.ones((batch_size, 1))).to(self.device) t_array = s_array + 1 s_norm = s_array / self.T t_norm = t_array / self.T # Sample z_s sparse_sampled_data = self.sample_p_zs_given_zt( s_norm, t_norm, sparse_sampled_data ) # keep_chain can be very small, e.g., 1 if ((s_int * number_chain_steps) % self.T == 0) and (keep_chain != 0): chain.append(sparse_sampled_data) # get generated graphs generated_graphs = sparse_sampled_data.to_device("cpu") generated_graphs.edge_attr = sparse_sampled_data.edge_attr.argmax(-1) generated_graphs.node = sparse_sampled_data.node.argmax(-1) if self.use_charge: generated_graphs.charge = sparse_sampled_data.charge.argmax(-1) - 1 if self.visualization_tools is not None: current_path = os.getcwd() # Visualize chains if keep_chain > 0: print("Visualizing chains...") chain_path = os.path.join( current_path, f"chains/{self.cfg.general.name}/" f"epoch{self.current_epoch}/", ) try: _ = self.visualization_tools.visualize_chain( chain_path, batch_id, chain, local_rank=self.local_rank ) except OSError: print("Warn: image chains failed to be visualized ") # Visualize the final molecules print("\nVisualizing molecules...") result_path = os.path.join( current_path, f"graphs/{self.name}/epoch{self.current_epoch}_b{batch_id}/", ) try: self.visualization_tools.visualize( result_path, generated_graphs, save_final, local_rank=self.local_rank, ) except OSError: print("Warn: image failed to be visualized ") print("Done.") return generated_graphs def sample_node(self, pred_X, p_s_and_t_given_0_X, node_mask): # Normalize predictions pred_X = F.softmax(pred_X, dim=-1) # bs, n, d0 # Dim of these two tensors: bs, N, d0, d_t-1 weighted_X = pred_X.unsqueeze(-1) * p_s_and_t_given_0_X # bs, n, d0, d_t-1 unnormalized_prob_X = weighted_X.sum(dim=2) # bs, n, d_t-1 unnormalized_prob_X[torch.sum(unnormalized_prob_X, dim=-1) == 0] = 1e-5 prob_X = unnormalized_prob_X / torch.sum( unnormalized_prob_X, dim=-1, keepdim=True ) # bs, n, d_t assert ((prob_X.sum(dim=-1) - 1).abs() < 1e-4).all() X_t = diffusion_utils.sample_discrete_node_features(prob_X, node_mask) return X_t, prob_X def sample_edge(self, pred_E, p_s_and_t_given_0_E, node_mask): # Normalize predictions bs, n, n, de = pred_E.shape pred_E = F.softmax(pred_E, dim=-1) # bs, n, n, d0 pred_E = pred_E.reshape((bs, -1, pred_E.shape[-1])) weighted_E = pred_E.unsqueeze(-1) * p_s_and_t_given_0_E # bs, N, d0, d_t-1 unnormalized_prob_E = weighted_E.sum(dim=-2) unnormalized_prob_E[torch.sum(unnormalized_prob_E, dim=-1) == 0] = 1e-5 prob_E = unnormalized_prob_E / torch.sum( unnormalized_prob_E, dim=-1, keepdim=True ) prob_E = prob_E.reshape(bs, n, n, de) assert ((prob_E.sum(dim=-1) - 1).abs() < 1e-4).all() E_t = diffusion_utils.sample_discrete_edge_features(prob_E, node_mask) return E_t, prob_E def sample_node_edge( self, pred, p_s_and_t_given_0_X, p_s_and_t_given_0_E, node_mask ): _, prob_X = self.sample_node(pred.X, p_s_and_t_given_0_X, node_mask) _, prob_E = self.sample_edge(pred.E, p_s_and_t_given_0_E, node_mask) sampled_s = diffusion_utils.sample_discrete_features( prob_X, prob_E, node_mask=node_mask ) return sampled_s def sample_sparse_node(self, pred_node, p_s_and_t_given_0_X): # Normalize predictions pred_X = F.softmax(pred_node, dim=-1) # N, dx # Dim of the second tensor: N, dx, dx weighted_X = pred_X.unsqueeze(-1) * p_s_and_t_given_0_X # N, dx, dx unnormalized_prob_X = weighted_X.sum(dim=1) # N, dx unnormalized_prob_X[ torch.sum(unnormalized_prob_X, dim=-1) == 0 ] = 1e-5 # TODO: delete/masking? prob_X = unnormalized_prob_X / torch.sum( unnormalized_prob_X, dim=-1, keepdim=True ) # N, dx assert ((prob_X.sum(dim=-1) - 1).abs() < 1e-4).all() X_t = prob_X.multinomial(1)[:, 0] return X_t def sample_sparse_edge(self, pred_edge, p_s_and_t_given_0_E): # Normalize predictions pred_E = F.softmax(pred_edge, dim=-1) # N, d0 # Dim of the second tensor: N, d0, dt-1 weighted_E = pred_E.unsqueeze(-1) * p_s_and_t_given_0_E # N, d0, dt-1 unnormalized_prob_E = weighted_E.sum(dim=1) # N, dt-1 unnormalized_prob_E[torch.sum(unnormalized_prob_E, dim=-1) == 0] = 1e-5 prob_E = unnormalized_prob_E / torch.sum( unnormalized_prob_E, dim=-1, keepdim=True ) assert ((prob_E.sum(dim=-1) - 1).abs() < 1e-4).all() E_t = prob_E.multinomial(1)[:, 0] return E_t def sample_sparse_node_edge( self, pred_node, pred_edge, p_s_and_t_given_0_X, p_s_and_t_given_0_E, pred_charge, p_s_and_t_given_0_charge, ): sampled_node = self.sample_sparse_node(pred_node, p_s_and_t_given_0_X).long() sampled_edge = self.sample_sparse_edge(pred_edge, p_s_and_t_given_0_E).long() if pred_charge.size(-1) > 0: sampled_charge = self.sample_sparse_node( pred_charge, p_s_and_t_given_0_charge ).long() else: sampled_charge = pred_charge return sampled_node, sampled_edge, sampled_charge def sample_p_zs_given_zt(self, s_float, t_float, data): """ Samples from zs ~ p(zs | zt). Only used during sampling. if last_step, return the graph prediction as well """ node = data.node edge_index = data.edge_index edge_attr = data.edge_attr y = data.y charge = data.charge ptr = data.ptr batch = data.batch beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # Retrieve transitions matrix Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) Qsb = self.transition_model.get_Qt_bar(alpha_s_bar, self.device) Qt = self.transition_model.get_Qt(beta_t, self.device) # Prior distribution # (N, dx, dx) p_s_and_t_given_0_X = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=node, batch=batch, Qt=Qt.X, Qsb=Qsb.X, Qtb=Qtb.X ) ) p_s_and_t_given_0_charge = None if self.use_charge: p_s_and_t_given_0_charge = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=charge, batch=batch, Qt=Qt.charge, Qsb=Qsb.charge, Qtb=Qtb.charge, ) ) # prepare sparse information num_nodes = ptr.diff().long() num_edges = (num_nodes * (num_nodes - 1) / 2).long() # If we had one graph, we will iterate on all edges for each step # we also make sure that the non existing edge number remains the same with the training process ( all_condensed_index, all_edge_batch, all_edge_mask,
) = sampled_condensed_indices_uniformly(
8
2023-10-30 12:12:16+00:00
16k
akekic/causal-component-analysis
experiments/cauca/main.py
[ { "identifier": "DGP", "path": "config.py", "snippet": "DGP = {\n \"graph-4-0\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-1\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-2\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-3\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-4\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 12_500,\n \"observation_dim\": 128, # D\n },\n \"graph-4-5\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [1, 0, 1, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-6\": {\n \"num_causal_variables\": 10, # N\n \"adj_matrix\": np.array(\n [\n [0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 1, 1, 0],\n ]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-7\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 100_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-8\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-9\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-10\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-9-local\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-1\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p000\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p025\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.25,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p050\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p075\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.75,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p100\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 1.0,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-1-local\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 4, # D\n },\n \"graph-7-random-1\": {\n \"num_causal_variables\": 7, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 7, # D\n },\n \"graph-7-random-1-local\": {\n \"num_causal_variables\": 7, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 7, # D\n },\n \"graph-2-1\": {\n \"num_causal_variables\": 2, # N\n \"adj_matrix\": np.array([[0, 1], [0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 2, # D\n },\n \"graph-2-2\": {\n \"num_causal_variables\": 2, # N\n \"adj_matrix\": np.array([[0, 0], [0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 2, # D\n },\n \"graph-3-1\": {\n \"num_causal_variables\": 3, # N\n \"adj_matrix\": np.array([[0, 1, 1], [0, 0, 0], [0, 0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 3, # D\n },\n \"graph-3-random-1\": {\n \"num_causal_variables\": 3, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 3, # D\n },\n \"graph-5-random-1\": {\n \"num_causal_variables\": 5, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 5, # D\n },\n}" }, { "identifier": "MultiEnvDataModule", "path": "data_generator/data_module.py", "snippet": "class MultiEnvDataModule(LightningDataModule):\n \"\"\"\n Data module for multi-environment data.\n\n Attributes\n ----------\n medgp: MultiEnvDGP\n Multi-environment data generating process.\n num_samples_per_env: int\n Number of samples per environment.\n batch_size: int\n Batch size.\n num_workers: int\n Number of workers for the data loaders.\n intervention_targets_per_env: Tensor, shape (num_envs, num_causal_variables)\n Intervention targets per environment, with 1 indicating that the variable is intervened on.\n log_dir: Optional[Path]\n Directory to save summary statistics and plots to. Default: None.\n intervention_target_misspec: bool\n Whether to misspecify the intervention targets. If true, the intervention targets are permuted.\n I.e. the model received the wrong intervention targets. Default: False.\n intervention_target_perm: Optional[list[int]]\n Permutation of the intervention targets. If None, a random permutation is used. Only used if\n intervention_target_misspec is True. Default: None.\n\n Methods\n -------\n setup(stage=None) -> None\n Setup the data module. This is where the data is sampled.\n train_dataloader() -> DataLoader\n Return the training data loader.\n val_dataloader() -> DataLoader\n Return the validation data loader.\n test_dataloader() -> DataLoader\n Return the test data loader.\n \"\"\"\n\n def __init__(\n self,\n multi_env_dgp: MultiEnvDGP,\n num_samples_per_env: int,\n batch_size: int,\n num_workers: int,\n intervention_targets_per_env: Tensor,\n log_dir: Optional[Path] = None,\n intervention_target_misspec: bool = False,\n intervention_target_perm: Optional[list[int]] = None,\n ) -> None:\n super().__init__()\n self.medgp = multi_env_dgp\n self.num_samples_per_env = num_samples_per_env\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.intervention_targets_per_env = intervention_targets_per_env\n self.log_dir = log_dir\n\n self.intervention_target_misspec = intervention_target_misspec\n latent_dim = self.medgp.latent_scm.latent_dim\n assert (\n intervention_target_perm is None\n or len(intervention_target_perm) == latent_dim\n )\n self.intervention_target_perm = intervention_target_perm\n\n def setup(self, stage: Optional[str] = None) -> None:\n latent_dim = self.medgp.latent_scm.latent_dim\n num_envs = self.intervention_targets_per_env.shape[0]\n\n x, v, u, e, intervention_targets, log_prob = self.medgp.sample(\n self.num_samples_per_env,\n intervention_targets_per_env=self.intervention_targets_per_env,\n )\n if self.intervention_target_misspec:\n assert (\n num_envs == latent_dim + 1\n ), \"only works if num_envs == num_causal_variables + 1\"\n if self.intervention_target_perm is None:\n perm = random_perm(latent_dim)\n self.intervention_target_perm = perm\n else:\n perm = self.intervention_target_perm\n\n # remember where old targets were\n idx_mask_list = []\n for i in range(latent_dim):\n idx_mask = intervention_targets[:, i] == 1\n idx_mask_list.append(idx_mask)\n intervention_targets[idx_mask, i] = 0\n\n # permute targets\n for i in range(latent_dim):\n intervention_targets[idx_mask_list[i], perm[i]] = 1\n\n dataset = TensorDataset(x, v, u, e, intervention_targets, log_prob)\n train_size = int(0.8 * len(dataset))\n val_size = int(0.5 * (len(dataset) - train_size))\n test_size = len(dataset) - train_size - val_size\n (\n self.train_dataset,\n self.val_dataset,\n self.test_dataset,\n ) = torch.utils.data.random_split(dataset, [train_size, val_size, test_size])\n\n if self.log_dir is not None:\n self.log_dir.mkdir(parents=True, exist_ok=True)\n summary_stats = summary_statistics(x, v, e, intervention_targets)\n for key, value in summary_stats.items():\n value.to_csv(self.log_dir / f\"{key}_summary_stats.csv\")\n plot_dag(self.medgp.adjacency_matrix, self.log_dir)\n try:\n with open(self.log_dir / \"base_coeff_values.txt\", \"w\") as f:\n f.write(str(self.medgp.latent_scm.base_coeff_values))\n except AttributeError:\n pass\n # save mixing function coefficients\n self.medgp.mixing_function.save_coeffs(self.log_dir)\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n\n def val_dataloader(self) -> DataLoader:\n val_loader = DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n return val_loader\n\n def test_dataloader(self) -> DataLoader:\n test_loader = DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n return test_loader" }, { "identifier": "make_multi_env_dgp", "path": "data_generator/multi_env_gdp.py", "snippet": "def make_multi_env_dgp(\n latent_dim: int,\n observation_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n shift_noise: bool = True,\n noise_shift_type: str = \"mean\",\n mixing: str = \"nonlinear\",\n scm: str = \"linear\",\n n_nonlinearities: int = 1,\n scm_coeffs_low: float = -1,\n scm_coeffs_high: float = 1,\n coeffs_min_abs_value: float = None,\n edge_prob: float = None,\n snr: float = 1.0,\n) -> MultiEnvDGP:\n \"\"\"\n Create a multi-environment data generating process (DGP).\n\n Parameters\n ----------\n latent_dim: int\n Dimension of the latent variables.\n observation_dim: int\n Dimension of the observed variables.\n adjacency_matrix: np.ndarray, shape (latent_dim, latent_dim)\n Adjacency matrix of the latent SCM.\n intervention_targets_per_env: Tensor, shape (num_envs, latent_dim)\n Intervention targets per environment, with 1 indicating that the variable is intervened on\n and 0 indicating that the variable is not intervened on. This variable also implicitly defines\n the number of environments.\n shift_noise: bool\n Whether to shift the noise distribution for variables that are intervened on. Default: False.\n noise_shift_type: str\n Whether to shift the mean or standard deviation of the noise distribution for variables that are intervened on.\n Options: \"mean\" or \"std\". Default: \"mean\".\n mixing: str\n Mixing function. Options: \"linear\" or \"nonlinear\". Default: \"nonlinear\".\n scm: str\n Latent SCM. Options: \"linear\" or \"location-scale\". Default: \"linear\".\n n_nonlinearities: int\n Number of nonlinearities in the nonlinear mixing function. Default: 1.\n scm_coeffs_low: float\n Lower bound of the SCM coefficients in linear SCMs. Default: -1.\n scm_coeffs_high: float\n Upper bound of the SCM coefficients in linear SCMs. Default: 1.\n coeffs_min_abs_value: float\n Minimum absolute value of the SCM coefficients in linear SCMs. If None, no minimum absolute value is enforced.\n Default: None.\n edge_prob: float\n Probability of an edge in the adjacency matrix if no adjacency matrix is given. Default: None.\n snr: float\n Signal-to-noise ratio of the location-scale SCM. Default: 1.0.\n\n Returns\n -------\n medgp: MultiEnvDGP\n Multi-environment data generating process.\n \"\"\"\n if mixing == \"linear\":\n mixing_function = LinearMixing(\n latent_dim=latent_dim, observation_dim=observation_dim\n )\n elif mixing == \"nonlinear\":\n mixing_function = NonlinearMixing(\n latent_dim=latent_dim,\n observation_dim=observation_dim,\n n_nonlinearities=n_nonlinearities,\n )\n else:\n raise ValueError(f\"Unknown mixing function {mixing}\")\n\n # if adjacency_matrix is not given as numpy array, sample a random one\n if not isinstance(adjacency_matrix, np.ndarray):\n assert (\n edge_prob is not None\n ), \"edge_prob must be given if no adjacency_matrix is given\"\n adjacency_matrix = sample_random_dag(latent_dim, edge_prob)\n adjacency_matrix = adjacency_matrix\n\n if scm == \"linear\":\n latent_scm = LinearSCM(\n adjacency_matrix=adjacency_matrix,\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n coeffs_low=scm_coeffs_low,\n coeffs_high=scm_coeffs_high,\n coeffs_min_abs_value=coeffs_min_abs_value,\n )\n elif scm == \"location-scale\":\n latent_scm = LocationScaleSCM(\n adjacency_matrix=adjacency_matrix,\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n snr=snr,\n )\n else:\n raise ValueError(f\"Unknown SCM {scm}\")\n\n noise_generator = GaussianNoise(\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n shift=shift_noise,\n shift_type=noise_shift_type,\n )\n medgp = MultiEnvDGP(\n latent_scm=latent_scm,\n noise_generator=noise_generator,\n mixing_function=mixing_function,\n )\n return medgp" }, { "identifier": "LinearCauCAModel", "path": "model/cauca_model.py", "snippet": "class LinearCauCAModel(CauCAModel):\n \"\"\"\n CauCA model with linear unmixing function.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n fix_mechanisms: bool = True,\n nonparametric_base_distr: bool = False,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = LinearCauCAEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n intervention_targets_per_env=intervention_targets_per_env,\n fix_mechanisms=fix_mechanisms,\n nonparametric_base_distr=nonparametric_base_distr,\n )\n self.save_hyperparameters()" }, { "identifier": "NaiveNonlinearModel", "path": "model/cauca_model.py", "snippet": "class NaiveNonlinearModel(CauCAModel):\n \"\"\"\n Naive CauCA model with nonlinear unmixing function. It assumes no causal dependencies.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n k_flows: int = 1,\n intervention_targets_per_env: Optional[torch.Tensor] = None,\n net_hidden_dim: int = 128,\n net_hidden_layers: int = 3,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = NaiveEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n K=k_flows,\n intervention_targets_per_env=intervention_targets_per_env,\n net_hidden_dim=net_hidden_dim,\n net_hidden_layers=net_hidden_layers,\n )\n self.save_hyperparameters()" }, { "identifier": "NonlinearCauCAModel", "path": "model/cauca_model.py", "snippet": "class NonlinearCauCAModel(CauCAModel):\n \"\"\"\n CauCA model with nonlinear unmixing function.\n\n Additional attributes\n ---------------------\n k_flows : int\n Number of flows to use in the nonlinear unmixing function. Default: 1.\n net_hidden_dim : int\n Hidden dimension of the neural network used in the nonlinear unmixing function. Default: 128.\n net_hidden_layers : int\n Number of hidden layers of the neural network used in the nonlinear unmixing function. Default: 3.\n fix_mechanisms : bool\n Some mechanisms can be fixed to a simple gaussian distribution without loss of generality.\n This has only an effect for the parametric base distribution. If True, these mechanisms are fixed.\n Default: True.\n fix_all_intervention_targets : bool\n When fixable mechanisms are fixed, this parameter determines whether all intervention targets\n are fixed (option 1) or all intervention targets which are non-root nodes together with all\n non-intervened root nodes (option 2). See documentation of ParamMultiEnvCausalDistribution\n for more details. Default: False.\n nonparametric_base_distr : bool\n Whether to use a nonparametric base distribution for the flows. If false, a parametric linear\n gaussian causal base distribution is used. Default: False.\n K_cbn : int\n Number of flows to use in the nonlinear nonparametric base distribution. Default: 3.\n net_hidden_dim_cbn : int\n Hidden dimension of the neural network used in the nonlinear nonparametric base distribution. Default: 128.\n net_hidden_layers_cbn : int\n Number of hidden layers of the neural network used in the nonlinear nonparametric base distribution. Default: 3.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n k_flows: int = 1,\n net_hidden_dim: int = 128,\n net_hidden_layers: int = 3,\n fix_mechanisms: bool = True,\n fix_all_intervention_targets: bool = False,\n nonparametric_base_distr: bool = False,\n K_cbn: int = 3,\n net_hidden_dim_cbn: int = 128,\n net_hidden_layers_cbn: int = 3,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = NonlinearCauCAEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n K=k_flows,\n intervention_targets_per_env=intervention_targets_per_env,\n net_hidden_dim=net_hidden_dim,\n net_hidden_layers=net_hidden_layers,\n fix_mechanisms=fix_mechanisms,\n fix_all_intervention_targets=fix_all_intervention_targets,\n nonparametric_base_distr=nonparametric_base_distr,\n K_cbn=K_cbn,\n net_hidden_dim_cbn=net_hidden_dim_cbn,\n net_hidden_layers_cbn=net_hidden_layers_cbn,\n )\n self.save_hyperparameters()" } ]
import argparse import os import pytorch_lightning as pl from pathlib import Path from pytorch_lightning.loggers import WandbLogger from config import DGP from data_generator import MultiEnvDataModule, make_multi_env_dgp from model.cauca_model import LinearCauCAModel, NaiveNonlinearModel, NonlinearCauCAModel
12,283
) parser.add_argument( "--fix-mechanisms", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Fix fixable mechanisms in latents.", ) parser.add_argument( "--fix-all-intervention-targets", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Fix all intervention targets.", ) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="cauca", help="Weights & Biases project name.", ) args = parser.parse_args() if args.function_misspec: assert ( args.mixing == "nonlinear" and args.model == "linear" ), "Function not misspecified." if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear": model = NonlinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, k_flows=args.k_flows, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, net_hidden_dim=args.net_hidden_dim, net_hidden_layers=args.net_hidden_layers, fix_mechanisms=args.fix_mechanisms, fix_all_intervention_targets=args.fix_all_intervention_targets, nonparametric_base_distr=args.nonparametric_base_distr, K_cbn=args.k_flows_cbn, net_hidden_dim_cbn=args.net_hidden_dim_cbn, net_hidden_layers_cbn=args.net_hidden_layers_cbn, ) elif args.model == "linear": model = LinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, fix_mechanisms=args.fix_mechanisms, nonparametric_base_distr=args.nonparametric_base_distr, ) elif args.model == "naive":
if __name__ == "__main__": parser = argparse.ArgumentParser( description="Run experiment for Causal Component Analysis (CauCA)." ) parser.add_argument( "--max-epochs", type=int, default=10, help="Number of epochs to train for.", ) parser.add_argument( "--accelerator", type=str, default="gpu", help="Accelerator to use for training.", ) parser.add_argument( "--batch-size", type=int, default=1024, help="Number of samples per batch.", ) parser.add_argument( "--lr", type=float, default=1e-4, help="Learning rate for Adam optimizer.", ) parser.add_argument( "--checkpoint-root-dir", type=str, default="checkpoints", help="Checkpoint root directory.", ) parser.add_argument( "--noise-shift-type", type=str, default="mean", choices=["mean", "std"], help="Property of noise distribution that is shifted between environments.", ) parser.add_argument( "--check-val-every-n-epoch", type=int, default=1, help="Check validation loss every n epochs.", ) parser.add_argument( "--dgp", type=str, default="graph-4-0", help="Data generation process to use.", ) parser.add_argument( "--k-flows", type=int, default=1, help="Number of flows to use in nonlinear ICA model.", ) parser.add_argument( "--k-flows-cbn", type=int, default=3, help="Number of flows to use in nonlinear latent CBN model.", ) parser.add_argument( "--model", type=str, default="nonlinear", help="Type of encoder to use.", choices=["linear", "nonlinear", "naive"], ) parser.add_argument( "--seed", type=int, default=42, ) parser.add_argument( "--training-seed", type=int, default=42, ) parser.add_argument( "--mixing", type=str, default="nonlinear", help="Type of mixing function to use.", choices=["linear", "nonlinear"], ) parser.add_argument( "--scm", type=str, default="linear", help="Type of SCM to use.", choices=["linear", "location-scale"], ) parser.add_argument( "--n-nonlinearities", type=int, default=1, help="Number of nonlinearities to use in nonlinear mixing function.", ) parser.add_argument( "--learn-scm-params", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to learn SCM parameters.", ) parser.add_argument( "--lr-scheduler", type=str, default=None, help="Learning rate scheduler.", choices=[None, "cosine"], ) parser.add_argument( "--lr-min", type=float, default=0.0, help="Minimum learning rate for cosine learning rate scheduler.", ) parser.add_argument( "--scm-coeffs-low", type=float, default=-1, help="Lower bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-high", type=float, default=1, help="Upper bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-min-abs-value", type=float, default=None, help="Minimum absolute value for SCM coefficients.", ) parser.add_argument( "--snr", type=float, default=1.0, help="Signal-to-noise ratio in latent SCM.", ) parser.add_argument( "--adjacency-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify adjacency matrix - assume ICA.", ) parser.add_argument( "--function-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify function class - assume linear.", ) parser.add_argument( "--net-hidden-layers", type=int, default=3, help="Number of hidden layers in nonlinear encoder.", ) parser.add_argument( "--net-hidden-layers-cbn", type=int, default=3, help="Number of hidden layers in latent CBN model.", ) parser.add_argument( "--net-hidden-dim", type=int, default=128, help="Number of hidden dimensions in nonlinear encoder.", ) parser.add_argument( "--net-hidden-dim-cbn", type=int, default=128, help="Number of hidden dimensions in latent CBN model.", ) parser.add_argument( "--fix-mechanisms", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Fix fixable mechanisms in latents.", ) parser.add_argument( "--fix-all-intervention-targets", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Fix all intervention targets.", ) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="cauca", help="Weights & Biases project name.", ) args = parser.parse_args() if args.function_misspec: assert ( args.mixing == "nonlinear" and args.model == "linear" ), "Function not misspecified." if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear": model = NonlinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, k_flows=args.k_flows, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, net_hidden_dim=args.net_hidden_dim, net_hidden_layers=args.net_hidden_layers, fix_mechanisms=args.fix_mechanisms, fix_all_intervention_targets=args.fix_all_intervention_targets, nonparametric_base_distr=args.nonparametric_base_distr, K_cbn=args.k_flows_cbn, net_hidden_dim_cbn=args.net_hidden_dim_cbn, net_hidden_layers_cbn=args.net_hidden_layers_cbn, ) elif args.model == "linear": model = LinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, fix_mechanisms=args.fix_mechanisms, nonparametric_base_distr=args.nonparametric_base_distr, ) elif args.model == "naive":
model = NaiveNonlinearModel(
4
2023-10-25 09:25:26+00:00
16k
endo-yuki-t/MAG
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas)\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec\n \n @torch.no_grad()\n def forward_ddim(self, x_latent, cond, total_step=10, t0=1000, return_noised_maps=False):\n seq_inv = np.linspace(0, 1, total_step) * (t0-1)\n seq_inv = [int(s) for s in list(seq_inv)]\n seq_inv_next = [-1] + list(seq_inv[:-1])\n x_enc = x_latent.clone()\n x_encs = []\n with tqdm(total=len(seq_inv), desc=f\"Inversion process \", ascii=True) as progress_bar:\n for it, (i, j) in enumerate(zip((seq_inv_next[1:]), (seq_inv[1:]))):\n if return_noised_maps:\n x_encs.append(x_enc)\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_prev = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n x_enc, _ = denoising_step(x_enc, c=cond, t=t, t_next=t_prev, model=self.model, b=self.model.betas, eta=0)\n progress_bar.update(1)\n \n if return_noised_maps:\n return x_enc, x_encs\n \n return x_enc\n \n @torch.no_grad()\n def reverse_ddim(self, x_latent, cond, total_step=10, t0=1000, eta=0, unconditional_guidance_scale=1.0, unconditional_conditioning=None, noised_maps=False, mask=False, merge_stop_th=10):\n seq_test = np.linspace(0, 1, total_step) * (t0-1)\n seq_test = [int(s) for s in list(seq_test)]\n seq_test_next = [-1] + list(seq_test[:-1])\n x_dec = x_latent.clone()\n step=len(seq_test)-1\n with tqdm(total=len(seq_test), desc=\"Generative process\", ascii=True) as progress_bar:\n for i, j in zip(reversed(seq_test[1:]), reversed(seq_test_next[1:])):\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_next = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n x_dec, x_0 = denoising_step(x_dec, c=cond, t=t, t_next=t_next, model=self.model, b=self.model.betas, \n eta=eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) \n if noised_maps is not False:\n step -= 1\n if step>merge_stop_th:\n x_dec = mask*x_dec+(1.-mask)*noised_maps[step]\n progress_bar.update(1)\n return x_dec\n \n def attention_guided_reverse_ddim(self, x_latent, cond, total_step=50, t0=1000, eta=0, unconditional_guidance_scale=1.0, unconditional_conditioning=None, att_masks=None, word_ids_for_mask=None, alpha=0.08, lmbd=0.5, swapping_step_th=float('inf'), guidance_step_th=float('inf')):\n seq_test = np.linspace(0, 1, total_step) * (t0-1)\n seq_test = [int(s) for s in list(seq_test)]\n seq_test_next = [-1] + list(seq_test[:-1])\n step=len(seq_test)-1\n optimized_latent = x_latent.clone().detach()\n \n with tqdm(total=len(seq_test), desc=\"Generative process\", ascii=True) as progress_bar:\n for i, j in zip(reversed(seq_test[1:]), reversed(seq_test_next[1:])):\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_next = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n \n if t[0]>guidance_step_th:\n optimized_latent.requires_grad = True\n opt = torch.optim.SGD([optimized_latent], lr=alpha)\n _ = self.model.apply_model(optimized_latent, t.detach(), cond.detach())\n \n loss = 0.\n for name, module in self.model.named_modules():\n module_name = type(module).__name__\n if module_name == \"MemoryEfficientCrossAttention\" and 'attn2' in name:\n att = module.stored_attention\n w = int(math.sqrt(att.shape[1]))\n for amid, att_mask in enumerate(att_masks):\n if amid >= len(word_ids_for_mask):\n continue\n att_mask = att_mask.detach()\n att_mask_resized = torch.nn.functional.interpolate(att_mask, size=(w,w)).to(torch.bool)\n att_mask_ = rearrange(att_mask_resized, 'b ... -> b (...)')\n att_mask_ = repeat(att_mask_, 'b j -> (b h) j', h=module.heads)\n \n word_ids = word_ids_for_mask[amid]\n loss += -att[:,:,word_ids][att_mask_==1].sum()\n loss += lmbd*att[:,:,word_ids][att_mask_==0].sum()\n \n #print(\"Masked attention loss:\", loss)\n loss.backward(retain_graph=False)\n opt.step()\n \n with torch.no_grad():\n if t[0]>swapping_step_th:\n att_masks_att_ids = [att_masks,word_ids_for_mask]\n else:\n att_masks_att_ids = None\n x_dec, x_0 = denoising_step(optimized_latent, c=cond, t=t, t_next=t_next, model=self.model, b=self.model.betas, \n eta=eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning,\n att_mask=att_masks_att_ids)\n \n optimized_latent = x_dec.detach().clone() \n step -= 1\n progress_bar.update(1)\n \n return optimized_latent" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
11,199
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
self.model_ema = LitEma(self.model)
8
2023-10-27 06:56:37+00:00
16k
Gene-Weaver/VoucherVision
vouchervision/VoucherVision_GUI.py
[ { "identifier": "write_config_file", "path": "vouchervision/LeafMachine2_Config_Builder.py", "snippet": "def write_config_file(config_data, dir_home, filename=\"LeafMachine2.yaml\"):\n file_path = os.path.join(dir_home, filename)\n\n # Write the data to a YAML file\n with open(file_path, \"w\") as outfile:\n yaml.dump(config_data, outfile, default_flow_style=False)" }, { "identifier": "build_VV_config", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def build_VV_config():\n #############################################\n ############ Set common defaults ############\n #############################################\n # Changing the values below will set new \n # default values each time you open the \n # VoucherVision user interface\n #############################################\n #############################################\n #############################################\n\n dir_home = os.path.dirname(os.path.dirname(__file__))\n run_name = 'test'\n # dir_images_local = 'D:/Dropbox/LM2_Env/Image_Datasets/GBIF_BroadSample_3SppPerFamily1'\n dir_images_local = os.path.join(dir_home,'demo','demo_images')\n \n # The default output location is the computer's \"Downloads\" folder\n # You can set dir_output directly by typing the folder path,\n # OR you can uncomment the line \"dir_output = default_output_folder\" \n # to have VoucherVision save to the Downloads folder by default\n default_output_folder = get_default_download_folder()\n dir_output = default_output_folder\n # dir_output = 'D:/D_Desktop/LM2'\n\n prefix_removal = '' #'MICH-V-'\n suffix_removal = ''\n catalog_numerical_only = False\n\n LLM_version_user = 'Azure GPT 4'\n prompt_version = 'Version 2' # from [\"Version 1\", \"Version 1 No Domain Knowledge\", \"Version 2\"]\n use_LeafMachine2_collage_images = False # Use LeafMachine2 collage images\n do_create_OCR_helper_image = False\n\n batch_size = 500\n\n path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx')\n embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0]\n\n #############################################\n #############################################\n ########## DO NOT EDIT BELOW HERE ###########\n #############################################\n #############################################\n return assemble_config(dir_home, run_name, dir_images_local,dir_output,\n prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size,\n path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,\n prompt_version, do_create_OCR_helper_image, use_domain_knowledge=False)" }, { "identifier": "run_demo_tests_GPT", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_demo_tests_GPT(progress_report):\n dir_home, path_to_configs, test_results = build_demo_tests('gpt')\n progress_report.set_n_overall(len(test_results.items()))\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n OPT1, OPT2, OPT3 = TestOptionsGPT.get_options()\n \n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n\n if opt1_readable in [\"Azure GPT 4\", \"Azure GPT 3.5\"]:\n api_version = 'gpt-azure'\n elif opt1_readable in [\"GPT 4\", \"GPT 3.5\"]:\n api_version = 'gpt'\n else:\n raise\n\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n get_n_overall = progress_report.get_n_overall()\n progress_report.update_overall(f\"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}\")\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr'):\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, progress_report=progress_report, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n except Exception as e:\n JSON_results[ind] = None\n test_results[cfg] = False\n print(f\"An exception occurred: {e}\")\n traceback.print_exc() # This will print the full traceback\n else:\n fail_response = ''\n if not check_API_key(dir_home, 'google-vision-ocr'):\n fail_response += \"No API key found for Google Vision OCR\"\n if not check_API_key(dir_home, api_version):\n fail_response += f\" + No API key found for {api_version}\"\n test_results[cfg] = False\n JSON_results[ind] = fail_response\n print(f\"No API key found for {fail_response}\")\n \n return test_results, JSON_results" }, { "identifier": "run_demo_tests_Palm", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_demo_tests_Palm(progress_report):\n api_version = 'palm'\n\n dir_home, path_to_configs, test_results = build_demo_tests('palm')\n progress_report.set_n_overall(len(test_results.items()))\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n OPT1, OPT2, OPT3 = TestOptionsPalm.get_options()\n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # opt3_readable = \"Use Domain Knowledge\" if OPT3[int(ind_opt3.split('-')[1])] else \"Don't use Domain Knowledge\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n get_n_overall = progress_report.get_n_overall()\n progress_report.update_overall(f\"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}\")\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr') :\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, path_custom_prompts=None, progress_report=progress_report, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n except Exception as e:\n test_results[cfg] = False\n JSON_results[ind] = None\n print(f\"An exception occurred: {e}\")\n traceback.print_exc() # This will print the full traceback\n else:\n fail_response = ''\n if not check_API_key(dir_home, 'google-vision-ocr'):\n fail_response += \"No API key found for Google Vision OCR\"\n if not check_API_key(dir_home, api_version):\n fail_response += f\" + No API key found for {api_version}\"\n test_results[cfg] = False\n JSON_results[ind] = fail_response\n print(f\"No API key found for {fail_response}\")\n\n return test_results, JSON_results" }, { "identifier": "TestOptionsGPT", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "class TestOptionsGPT:\n OPT1 = [\"gpt-4-1106-preview\",\"GPT 4\", \"GPT 3.5\", \"Azure GPT 4\", \"Azure GPT 3.5\"]\n OPT2 = [False, True]\n OPT3 = [\"Version 1\", \"Version 1 No Domain Knowledge\", \"Version 2\"]\n\n @classmethod\n def get_options(cls):\n return cls.OPT1, cls.OPT2, cls.OPT3\n @classmethod\n def get_length(cls):\n return 24" }, { "identifier": "TestOptionsPalm", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "class TestOptionsPalm:\n OPT1 = [\"PaLM 2\"]\n OPT2 = [False, True]\n OPT3 = [\"Version 1 PaLM 2\", \"Version 1 PaLM 2 No Domain Knowledge\", \"Version 2 PaLM 2\"]\n\n @classmethod\n def get_options(cls):\n return cls.OPT1, cls.OPT2, cls.OPT3\n @classmethod\n def get_length(cls):\n return 6" }, { "identifier": "check_if_usable", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def check_if_usable():\n dir_home = os.path.dirname(os.path.dirname(__file__))\n path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml')\n cfg_private = get_cfg_from_full_path(path_cfg_private)\n\n has_key_openai = has_API_key(cfg_private['openai']['OPENAI_API_KEY'])\n\n has_key_azure_openai = has_API_key(cfg_private['openai_azure']['api_version']) \n\n has_key_palm2 = has_API_key(cfg_private['google_palm']['google_palm_api'])\n \n has_key_google_OCR = has_API_key(cfg_private['google_cloud']['path_json_file'])\n\n if has_key_google_OCR and (has_key_azure_openai or has_key_openai or has_key_palm2):\n return True\n else:\n return False" }, { "identifier": "run_api_tests", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_api_tests(api):\n try:\n dir_home, path_to_configs, test_results = build_api_tests(api)\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n if api == 'openai':\n OPT1, OPT2, OPT3 = TestOptionsAPI_openai.get_options()\n elif 'azure_openai':\n OPT1, OPT2, OPT3 = TestOptionsAPI_azure_openai.get_options()\n elif 'palm':\n OPT1, OPT2, OPT3 = TestOptionsAPI_palm.get_options()\n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # opt3_readable = \"Use Domain Knowledge\" if OPT3[int(ind_opt3.split('-')[1])] else \"Don't use Domain Knowledge\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api) and check_API_key(dir_home, 'google-vision-ocr') :\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, None,path_custom_prompts=None , cfg_test=None, progress_report=None, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n return True\n\n except Exception as e:\n print(e)\n return False\n else:\n return False\n except Exception as e:\n print(e)\n return False" }, { "identifier": "voucher_vision", "path": "vouchervision/vouchervision_main.py", "snippet": "def voucher_vision(cfg_file_path, dir_home, path_custom_prompts, cfg_test, progress_report, path_api_cost=None, test_ind = None, is_real_run=False):\n # get_n_overall = progress_report.get_n_overall()\n # progress_report.update_overall(f\"Working on {test_ind+1} of {get_n_overall}\")\n\n t_overall = perf_counter()\n\n # Load config file\n report_config(dir_home, cfg_file_path, system='VoucherVision')\n\n if cfg_test is None:\n cfg = load_config_file(dir_home, cfg_file_path, system='VoucherVision') # For VoucherVision\n else:\n cfg = cfg_test \n # user_cfg = load_config_file(dir_home, cfg_file_path)\n # cfg = Config(user_cfg)\n\n # Check to see if there are subdirs\n # Yes --> use the names of the subsirs as run_name\n run_name, dirs_list, has_subdirs = check_for_subdirs_VV(cfg)\n print(f\"run_name {run_name} dirs_list{dirs_list} has_subdirs{has_subdirs}\")\n\n # for dir_ind, dir_in in enumerate(dirs_list):\n # if has_subdirs:\n # cfg['leafmachine']['project']['dir_images_local'] = dir_in\n # cfg['leafmachine']['project']['run_name'] = run_name[dir_ind]\n\n # Dir structure\n if is_real_run:\n progress_report.update_overall(f\"Creating Output Directory Structure\")\n print_main_start(\"Creating Directory Structure\")\n Dirs = Dir_Structure(cfg)\n\n # logging.info(\"Hi\")\n logger = start_logging(Dirs, cfg)\n\n # Check to see if required ML files are ready to use\n if is_real_run:\n progress_report.update_overall(f\"Fetching LeafMachine2 Files\")\n ready_to_use = fetch_data(logger, dir_home, cfg_file_path)\n assert ready_to_use, \"Required ML files are not ready to use!\\nThe download may have failed,\\nor\\nthe directory structure of LM2 has been altered\"\n\n # Wrangle images and preprocess\n print_main_start(\"Gathering Images and Image Metadata\")\n Project = Project_Info(cfg, logger, dir_home, Dirs) # Where file names are modified\n\n # Save config file\n save_config_file(cfg, logger, Dirs)\n\n # Detect Archival Components\n print_main_start(\"Locating Archival Components\")\n Project = detect_archival_components(cfg, logger, dir_home, Project, Dirs, is_real_run, progress_report)\n\n # Save cropped detections\n crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)\n\n # Process labels\n Voucher_Vision = VoucherVision(cfg, logger, dir_home, path_custom_prompts, Project, Dirs)\n n_images = len(Voucher_Vision.img_paths)\n last_JSON_response, total_tokens_in, total_tokens_out = Voucher_Vision.process_specimen_batch(progress_report, is_real_run)\n \n if path_api_cost:\n cost_summary, data, total_cost = save_token_info_as_csv(Dirs, cfg['leafmachine']['LLM_version'], path_api_cost, total_tokens_in, total_tokens_out, n_images)\n add_to_expense_report(dir_home, data)\n logger.info(cost_summary)\n else:\n total_cost = None #TODO add config tests to expense_report\n\n t_overall_s = perf_counter()\n logger.name = 'Run Complete! :)'\n logger.info(f\"[Total elapsed time] {round((t_overall_s - t_overall)/60)} minutes\")\n space_saver(cfg, Dirs, logger)\n\n if is_real_run:\n progress_report.update_overall(f\"Run Complete! :sunglasses:\")\n\n for handler in logger.handlers[:]:\n handler.close()\n logger.removeHandler(handler)\n\n return last_JSON_response, total_cost" }, { "identifier": "voucher_vision_OCR_test", "path": "vouchervision/vouchervision_main.py", "snippet": "def voucher_vision_OCR_test(cfg_file_path, dir_home, cfg_test, path_to_crop):\n # get_n_overall = progress_report.get_n_overall()\n # progress_report.update_overall(f\"Working on {test_ind+1} of {get_n_overall}\")\n\n # Load config file\n report_config(dir_home, cfg_file_path, system='VoucherVision')\n\n if cfg_test is None:\n cfg = load_config_file(dir_home, cfg_file_path, system='VoucherVision') # For VoucherVision\n else:\n cfg = cfg_test \n # user_cfg = load_config_file(dir_home, cfg_file_path)\n # cfg = Config(user_cfg)\n\n # Check to see if there are subdirs\n # Yes --> use the names of the subsirs as run_name\n run_name, dirs_list, has_subdirs = check_for_subdirs_VV(cfg)\n print(f\"run_name {run_name} dirs_list{dirs_list} has_subdirs{has_subdirs}\")\n\n # for dir_ind, dir_in in enumerate(dirs_list):\n # if has_subdirs:\n # cfg['leafmachine']['project']['dir_images_local'] = dir_in\n # cfg['leafmachine']['project']['run_name'] = run_name[dir_ind]\n\n # Dir structure\n print_main_start(\"Creating Directory Structure\")\n Dirs = Dir_Structure(cfg)\n\n # logging.info(\"Hi\")\n logger = start_logging(Dirs, cfg)\n\n # Check to see if required ML files are ready to use\n ready_to_use = fetch_data(logger, dir_home, cfg_file_path)\n assert ready_to_use, \"Required ML files are not ready to use!\\nThe download may have failed,\\nor\\nthe directory structure of LM2 has been altered\"\n\n # Wrangle images and preprocess\n print_main_start(\"Gathering Images and Image Metadata\")\n Project = Project_Info(cfg, logger, dir_home, Dirs) # Where file names are modified\n\n # Save config file\n save_config_file(cfg, logger, Dirs)\n\n # Detect Archival Components\n print_main_start(\"Locating Archival Components\")\n Project = detect_archival_components(cfg, logger, dir_home, Project, Dirs)\n\n # Save cropped detections\n crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)\n\n # Process labels\n Voucher_Vision = VoucherVision(cfg, logger, dir_home, None, Project, Dirs)\n last_JSON_response = Voucher_Vision.process_specimen_batch_OCR_test(path_to_crop)" }, { "identifier": "test_GPU", "path": "vouchervision/general_utils.py", "snippet": "def test_GPU():\n info = []\n success = False\n\n if torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n info.append(f\"Number of GPUs: {num_gpus}\")\n\n for i in range(num_gpus):\n gpu = torch.cuda.get_device_properties(i)\n info.append(f\"GPU {i}: {gpu.name}\")\n\n success = True\n else:\n info.append(\"No GPU found!\")\n info.append(\"LeafMachine2 image cropping and embedding search will be slow or not possible.\")\n\n return success, info" }, { "identifier": "get_cfg_from_full_path", "path": "vouchervision/general_utils.py", "snippet": "def get_cfg_from_full_path(path_cfg):\n with open(path_cfg, \"r\") as ymlfile:\n cfg = yaml.full_load(ymlfile)\n return cfg" }, { "identifier": "summarize_expense_report", "path": "vouchervision/general_utils.py", "snippet": "def summarize_expense_report(path_expense_report):\n # Initialize counters and sums\n run_count = 0\n total_cost_sum = 0\n tokens_in_sum = 0\n tokens_out_sum = 0\n rate_in_sum = 0\n rate_out_sum = 0\n cost_in_sum = 0\n cost_out_sum = 0\n n_images_sum = 0\n api_version_counts = Counter()\n\n # Try to read the CSV file into a DataFrame\n try:\n df = pd.read_csv(path_expense_report)\n\n # Process each row in the DataFrame\n for index, row in df.iterrows():\n run_count += 1\n total_cost_sum += row['total_cost']\n tokens_in_sum += row['tokens_in']\n tokens_out_sum += row['tokens_out']\n rate_in_sum += row['rate_in']\n rate_out_sum += row['rate_out']\n cost_in_sum += row['cost_in']\n cost_out_sum += row['cost_out']\n n_images_sum += row['n_images']\n api_version_counts[row['api_version']] += 1\n\n except FileNotFoundError:\n print(f\"The file {path_expense_report} does not exist.\")\n return None\n\n # Calculate API version percentages\n api_version_percentages = {version: (count / run_count) * 100 for version, count in api_version_counts.items()}\n\n # Calculate cost per image for each API version\n cost_per_image_dict = {}\n for version, count in api_version_counts.items():\n total_cost = df[df['api_version'] == version]['total_cost'].sum()\n n_images = df[df['api_version'] == version]['n_images'].sum()\n cost_per_image = total_cost / n_images if n_images > 0 else 0\n cost_per_image_dict[version] = cost_per_image\n\n # Return the DataFrame and all summaries\n return {\n 'run_count': run_count,\n 'total_cost_sum': total_cost_sum,\n 'tokens_in_sum': tokens_in_sum,\n 'tokens_out_sum': tokens_out_sum,\n 'rate_in_sum': rate_in_sum,\n 'rate_out_sum': rate_out_sum,\n 'cost_in_sum': cost_in_sum,\n 'cost_out_sum': cost_out_sum,\n 'n_images_sum':n_images_sum,\n 'api_version_percentages': api_version_percentages,\n 'cost_per_image': cost_per_image_dict\n }, df" }, { "identifier": "create_google_ocr_yaml_config", "path": "vouchervision/general_utils.py", "snippet": "def create_google_ocr_yaml_config(output_file, dir_images_local, dir_output):\n # Define the configuration dictionary\n config = {\n 'leafmachine': {\n 'LLM_version': 'PaLM 2',\n 'archival_component_detector': {\n 'detector_iteration': 'PREP_final',\n 'detector_type': 'Archival_Detector',\n 'detector_version': 'PREP_final',\n 'detector_weights': 'best.pt',\n 'do_save_prediction_overlay_images': True,\n 'ignore_objects_for_overlay': [],\n 'minimum_confidence_threshold': 0.5\n },\n 'cropped_components': {\n 'binarize_labels': False,\n 'binarize_labels_skeletonize': False,\n 'do_save_cropped_annotations': True,\n 'save_cropped_annotations': ['label', 'barcode'],\n 'save_per_annotation_class': True,\n 'save_per_image': False\n },\n 'data': {\n 'do_apply_conversion_factor': False,\n 'include_darwin_core_data_from_combined_file': False,\n 'save_individual_csv_files_landmarks': False,\n 'save_individual_csv_files_measurements': False,\n 'save_individual_csv_files_rulers': False,\n 'save_individual_efd_files': False,\n 'save_json_measurements': False,\n 'save_json_rulers': False\n },\n 'do': {\n 'check_for_corrupt_images_make_vertical': True,\n 'check_for_illegal_filenames': False\n },\n 'logging': {\n 'log_level': None\n },\n 'modules': {\n 'specimen_crop': True\n },\n 'overlay': {\n 'alpha_transparency_archival': 0.3,\n 'alpha_transparency_plant': 0,\n 'alpha_transparency_seg_partial_leaf': 0.3,\n 'alpha_transparency_seg_whole_leaf': 0.4,\n 'ignore_archival_detections_classes': [],\n 'ignore_landmark_classes': [],\n 'ignore_plant_detections_classes': ['leaf_whole', 'specimen'],\n 'line_width_archival': 12,\n 'line_width_efd': 12,\n 'line_width_plant': 12,\n 'line_width_seg': 12,\n 'overlay_background_color': 'black',\n 'overlay_dpi': 300,\n 'save_overlay_to_jpgs': True,\n 'save_overlay_to_pdf': False,\n 'show_archival_detections': True,\n 'show_landmarks': True,\n 'show_plant_detections': True,\n 'show_segmentations': True\n },\n 'print': {\n 'optional_warnings': True,\n 'verbose': True\n },\n 'project': {\n 'batch_size': 500,\n 'build_new_embeddings_database': False,\n 'catalog_numerical_only': False,\n 'continue_run_from_partial_xlsx': '',\n 'delete_all_temps': False,\n 'delete_temps_keep_VVE': False,\n 'dir_images_local': dir_images_local,\n 'dir_output': dir_output,\n 'embeddings_database_name': 'SLTP_UM_AllAsiaMinimalInRegion',\n 'image_location': 'local',\n 'num_workers': 1,\n 'path_to_domain_knowledge_xlsx': '',\n 'prefix_removal': '',\n 'prompt_version': 'Version 2 PaLM 2',\n 'run_name': 'google_vision_ocr_test',\n 'suffix_removal': '',\n 'use_domain_knowledge': False\n },\n 'use_RGB_label_images': False\n }\n }\n # Generate the YAML string from the data structure\n validate_dir(os.path.dirname(output_file))\n yaml_str = yaml.dump(config, sort_keys=False)\n\n # Write the YAML string to a file\n with open(output_file, 'w') as file:\n file.write(yaml_str)" }, { "identifier": "validate_dir", "path": "vouchervision/general_utils.py", "snippet": "def validate_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, exist_ok=True)" } ]
import streamlit as st import yaml, os, json, random, time, re import matplotlib.pyplot as plt import plotly.graph_objs as go import numpy as np import pandas as pd from itertools import chain from PIL import Image from typing import Union from streamlit_extras.let_it_rain import rain from vouchervision.LeafMachine2_Config_Builder import write_config_file from vouchervision.VoucherVision_Config_Builder import build_VV_config, run_demo_tests_GPT, run_demo_tests_Palm , TestOptionsGPT, TestOptionsPalm, check_if_usable, run_api_tests from vouchervision.vouchervision_main import voucher_vision, voucher_vision_OCR_test from vouchervision.general_utils import test_GPU, get_cfg_from_full_path, summarize_expense_report, create_google_ocr_yaml_config, validate_dir
10,839
if st.session_state.config['leafmachine']['project']['prompt_version'] in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", True, disabled=True) else: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", False, disabled=True) st.write("") if st.session_state.config['leafmachine']['project']['use_domain_knowledge']: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', '')) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False)) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', '')) else: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', ''), disabled=True) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False), disabled=True) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', ''), disabled=True) def render_expense_report_summary(): expense_summary = st.session_state.expense_summary expense_report = st.session_state.expense_report st.header('Expense Report Summary') if expense_summary: st.metric(label="Total Cost", value=f"${round(expense_summary['total_cost_sum'], 4):,}") col1, col2 = st.columns(2) # Run count and total costs with col1: st.metric(label="Run Count", value=expense_summary['run_count']) st.metric(label="Tokens In", value=f"{expense_summary['tokens_in_sum']:,}") # Token information with col2: st.metric(label="Total Images", value=expense_summary['n_images_sum']) st.metric(label="Tokens Out", value=f"{expense_summary['tokens_out_sum']:,}") # Calculate cost proportion per image for each API version st.subheader('Average Cost per Image by API Version') cost_labels = [] cost_values = [] total_images = 0 cost_per_image_dict = {} # Iterate through the expense report to accumulate costs and image counts for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] n_images = row['n_images'] total_images += n_images # Keep track of total images processed if api_version not in cost_per_image_dict: cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} cost_per_image_dict[api_version]['total_cost'] += total_cost cost_per_image_dict[api_version]['n_images'] += n_images api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')):
PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE = ["Version 1","Version 1 PaLM 2"] COLORS_EXPENSE_REPORT = { 'GPT_4': '#8fff66', # Bright Green 'GPT_3_5': '#006400', # Dark Green 'PALM2': '#66a8ff' # blue } class ProgressReport: def __init__(self, overall_bar, batch_bar, text_overall, text_batch): self.overall_bar = overall_bar self.batch_bar = batch_bar self.text_overall = text_overall self.text_batch = text_batch self.current_overall_step = 0 self.total_overall_steps = 20 # number of major steps in machine function self.current_batch = 0 self.total_batches = 20 def update_overall(self, step_name=""): self.current_overall_step += 1 self.overall_bar.progress(self.current_overall_step / self.total_overall_steps) self.text_overall.text(step_name) def update_batch(self, step_name=""): self.current_batch += 1 self.batch_bar.progress(self.current_batch / self.total_batches) self.text_batch.text(step_name) def set_n_batches(self, n_batches): self.total_batches = n_batches def set_n_overall(self, total_overall_steps): self.current_overall_step = 0 self.overall_bar.progress(0) self.total_overall_steps = total_overall_steps def reset_batch(self, step_name): self.current_batch = 0 self.batch_bar.progress(0) self.text_batch.text(step_name) def reset_overall(self, step_name): self.current_overall_step = 0 self.overall_bar.progress(0) self.text_overall.text(step_name) def get_n_images(self): return self.n_images def get_n_overall(self): return self.total_overall_steps def does_private_file_exist(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return os.path.exists(path_cfg_private) def setup_streamlit_config(dir_home): # Define the directory path and filename dir_path = os.path.join(dir_home, ".streamlit") file_path = os.path.join(dir_path, "config.toml") # Check if directory exists, if not create it if not os.path.exists(dir_path): os.makedirs(dir_path) # Create or modify the file with the provided content config_content = f""" [theme] base = "dark" primaryColor = "#00ff00" [server] enableStaticServing = false runOnSave = true port = 8524 """ with open(file_path, "w") as f: f.write(config_content.strip()) def display_scrollable_results(JSON_results, test_results, OPT2, OPT3): """ Display the results from JSON_results in a scrollable container. """ # Initialize the container con_results = st.empty() with con_results.container(): # Start the custom container for all the results results_html = """<div class='scrollable-results-container'>""" for idx, (test_name, _) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if JSON_results[idx] is None: results_html += f"<p>None</p>" else: formatted_json = json.dumps(JSON_results[idx], indent=4, sort_keys=False) results_html += f"<pre>[{opt2_readable}] + [{opt3_readable}]<br/>{formatted_json}</pre>" # End the custom container results_html += """</div>""" # The CSS to make this container scrollable css = """ <style> .scrollable-results-container { overflow-y: auto; height: 600px; width: 100%; white-space: pre-wrap; # To wrap the content font-family: monospace; # To give the JSON a code-like appearance } </style> """ # Apply the CSS and then the results st.markdown(css, unsafe_allow_html=True) st.markdown(results_html, unsafe_allow_html=True) def refresh(): st.write('') def display_test_results(test_results, JSON_results, llm_version): if llm_version == 'gpt': OPT1, OPT2, OPT3 = TestOptionsGPT.get_options() elif llm_version == 'palm': OPT1, OPT2, OPT3 = TestOptionsPalm.get_options() else: raise widths = [1] * (len(OPT1) + 2) + [2] columns = st.columns(widths) with columns[0]: st.write("LeafMachine2") with columns[1]: st.write("Prompt") with columns[len(OPT1) + 2]: st.write("Scroll to See Last Transcription in Each Test") already_written = set() for test_name, result in sorted(test_results.items()): _, ind_opt1, _, _ = test_name.split('__') option_value = OPT1[int(ind_opt1.split('-')[1])] if option_value not in already_written: with columns[int(ind_opt1.split('-')[1]) + 2]: st.write(option_value) already_written.add(option_value) printed_options = set() with columns[-1]: display_scrollable_results(JSON_results, test_results, OPT2, OPT3) # Close the custom container st.write('</div>', unsafe_allow_html=True) for idx, (test_name, result) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if (opt2_readable, opt3_readable) not in printed_options: with columns[0]: st.info(f"{opt2_readable}") st.write('---') with columns[1]: st.info(f"{opt3_readable}") st.write('---') printed_options.add((opt2_readable, opt3_readable)) with columns[int(ind_opt1.split('-')[1]) + 2]: if result: st.success(f"Test Passed") else: st.error(f"Test Failed") st.write('---') # success_count = sum(1 for result in test_results.values() if result) # failure_count = len(test_results) - success_count # proportional_rain("🥇", success_count, "💔", failure_count, font_size=72, falling_speed=5, animation_length="infinite") rain_emojis(test_results) def add_emoji_delay(): time.sleep(0.3) def rain_emojis(test_results): # test_results = { # 'test1': True, # Test passed # 'test2': True, # Test passed # 'test3': True, # Test passed # 'test4': False, # Test failed # 'test5': False, # Test failed # 'test6': False, # Test failed # 'test7': False, # Test failed # 'test8': False, # Test failed # 'test9': False, # Test failed # 'test10': False, # Test failed # } success_emojis = ["🥇", "🏆", "🍾", "🙌"] failure_emojis = ["💔", "😭"] success_count = sum(1 for result in test_results.values() if result) failure_count = len(test_results) - success_count chosen_emoji = random.choice(success_emojis) for _ in range(success_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=4, animation_length=2, ) add_emoji_delay() chosen_emoji = random.choice(failure_emojis) for _ in range(failure_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=5, animation_length=1, ) add_emoji_delay() def get_prompt_versions(LLM_version): yaml_files = [f for f in os.listdir(os.path.join(st.session_state.dir_home, 'custom_prompts')) if f.endswith('.yaml')] if LLM_version in ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5"]: versions = ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] return (versions + yaml_files, "Version 2") elif LLM_version in ["PaLM 2",]: versions = ["Version 1 PaLM 2", "Version 1 PaLM 2 No Domain Knowledge", "Version 2 PaLM 2"] return (versions + yaml_files, "Version 2 PaLM 2") else: # Handle other cases or raise an error return (yaml_files, None) def get_private_file(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return get_cfg_from_full_path(path_cfg_private) def create_space_saver(): st.subheader("Space Saving Options") col_ss_1, col_ss_2 = st.columns([2,2]) with col_ss_1: st.write("Several folders are created and populated with data during the VoucherVision transcription process.") st.write("Below are several options that will allow you to automatically delete temporary files that you may not need for everyday operations.") st.write("VoucherVision creates the following folders. Folders marked with a :star: are required if you want to use VoucherVisionEditor for quality control.") st.write("`../[Run Name]/Archival_Components`") st.write("`../[Run Name]/Config_File`") st.write("`../[Run Name]/Cropped_Images` :star:") st.write("`../[Run Name]/Logs`") st.write("`../[Run Name]/Original_Images` :star:") st.write("`../[Run Name]/Transcription` :star:") with col_ss_2: st.session_state.config['leafmachine']['project']['delete_temps_keep_VVE'] = st.checkbox("Delete Temporary Files (KEEP files required for VoucherVisionEditor)", st.session_state.config['leafmachine']['project'].get('delete_temps_keep_VVE', False)) st.session_state.config['leafmachine']['project']['delete_all_temps'] = st.checkbox("Keep only the final transcription file", st.session_state.config['leafmachine']['project'].get('delete_all_temps', False),help="*WARNING:* This limits your ability to do quality assurance. This will delete all folders created by VoucherVision, leaving only the `transcription.xlsx` file.") # def create_private_file(): # st.session_state.proceed_to_main = False # if st.session_state.private_file: # cfg_private = get_private_file() # create_private_file_0(cfg_private) # else: # st.title("VoucherVision") # create_private_file_0() def create_private_file(): st.session_state.proceed_to_main = False st.title("VoucherVision") col_private,_= st.columns([12,2]) if st.session_state.private_file: cfg_private = get_private_file() else: cfg_private = {} cfg_private['openai'] = {} cfg_private['openai']['OPENAI_API_KEY'] ='' cfg_private['openai_azure'] = {} cfg_private['openai_azure']['openai_api_key'] = '' cfg_private['openai_azure']['api_version'] = '' cfg_private['openai_azure']['openai_api_base'] ='' cfg_private['openai_azure']['openai_organization'] ='' cfg_private['openai_azure']['openai_api_type'] ='' cfg_private['google_cloud'] = {} cfg_private['google_cloud']['path_json_file'] ='' cfg_private['google_palm'] = {} cfg_private['google_palm']['google_palm_api'] ='' with col_private: st.header("Set API keys") st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.warning("To commit changes to API keys you must press the 'Set API Keys' button at the bottom of the page.") st.write("Before using VoucherVision you must set your API keys. All keys are stored locally on your computer and are never made public.") st.write("API keys are stored in `../VoucherVision/PRIVATE_DATA.yaml`.") st.write("Deleting this file will allow you to reset API keys. Alternatively, you can edit the keys in the user interface.") st.write("Leave keys blank if you do not intend to use that service.") st.write("---") st.subheader("Google Vision (*Required*)") st.markdown("VoucherVision currently uses [Google Vision API](https://cloud.google.com/vision/docs/ocr) for OCR. Generating an API key for this is more involved than the others. [Please carefully follow the instructions outlined here to create and setup your account.](https://cloud.google.com/vision/docs/setup) ") st.markdown(""" Once your account is created, [visit this page](https://console.cloud.google.com) and create a project. Then follow these instructions: - **Select your Project**: If you have multiple projects, ensure you select the one where you've enabled the Vision API. - **Open the Navigation Menu**: Click on the hamburger menu (three horizontal lines) in the top left corner. - **Go to IAM & Admin**: In the navigation pane, hover over "IAM & Admin" and then click on "Service accounts." - **Locate Your Service Account**: Find the service account for which you wish to download the JSON key. If you haven't created a service account yet, you'll need to do so by clicking the "CREATE SERVICE ACCOUNT" button at the top. - **Download the JSON Key**: - Click on the three dots (actions menu) on the right side of your service account name. - Select "Manage keys." - In the pop-up window, click on the "ADD KEY" button and select "JSON." - The JSON key file will automatically be downloaded to your computer. - **Store Safely**: This file contains sensitive data that can be used to authenticate and bill your Google Cloud account. Never commit it to public repositories or expose it in any way. Always keep it safe and secure. """) with st.container(): c_in_ocr, c_button_ocr = st.columns([10,2]) with c_in_ocr: google_vision = st.text_input(label = 'Full path to Google Cloud JSON API key file', value = cfg_private['google_cloud'].get('path_json_file', ''), placeholder = 'e.g. C:/Documents/Secret_Files/google_API/application_default_credentials.json', help ="This API Key is in the form of a JSON file. Please save the JSON file in a safe directory. DO NOT store the JSON key inside of the VoucherVision directory.", type='password',key='924857298734590283750932809238') with c_button_ocr: st.empty() st.write("---") st.subheader("OpenAI") st.markdown("API key for first-party OpenAI API. Create an account with OpenAI [here](https://platform.openai.com/signup), then create an API key [here](https://platform.openai.com/account/api-keys).") with st.container(): c_in_openai, c_button_openai = st.columns([10,2]) with c_in_openai: openai_api_key = st.text_input("openai_api_key", cfg_private['openai'].get('OPENAI_API_KEY', ''), help='The actual API key. Likely to be a string of 2 character, a dash, and then a 48-character string: sk-XXXXXXXX...', placeholder = 'e.g. sk-XXXXXXXX...', type='password') with c_button_openai: st.empty() st.write("---") st.subheader("OpenAI - Azure") st.markdown("This version OpenAI relies on Azure servers directly as is intended for private enterprise instances of OpenAI's services, such as [UM-GPT](https://its.umich.edu/computing/ai). Administrators will provide you with the following information.") azure_openai_api_version = st.text_input("azure_openai_api_version", cfg_private['openai_azure'].get('api_version', ''), help='API Version e.g. "2023-05-15"', placeholder = 'e.g. 2023-05-15', type='password') azure_openai_api_key = st.text_input("azure_openai_api_key", cfg_private['openai_azure'].get('openai_api_key', ''), help='The actual API key. Likely to be a 32-character string', placeholder = 'e.g. 12333333333333333333333333333332', type='password') azure_openai_api_base = st.text_input("azure_openai_api_base", cfg_private['openai_azure'].get('openai_api_base', ''), help='The base url for the API e.g. "https://api.umgpt.umich.edu/azure-openai-api"', placeholder = 'e.g. https://api.umgpt.umich.edu/azure-openai-api', type='password') azure_openai_organization = st.text_input("azure_openai_organization", cfg_private['openai_azure'].get('openai_organization', ''), help='Your organization code. Likely a short string', placeholder = 'e.g. 123456', type='password') azure_openai_api_type = st.text_input("azure_openai_api_type", cfg_private['openai_azure'].get('openai_api_type', ''), help='The API type. Typically "azure"', placeholder = 'e.g. azure', type='password') with st.container(): c_in_azure, c_button_azure = st.columns([10,2]) with c_button_azure: st.empty() st.write("---") st.subheader("Google PaLM 2") st.markdown('Follow these [instructions](https://developers.generativeai.google/tutorials/setup) to generate an API key for PaLM 2. You may need to also activate an account with [MakerSuite](https://makersuite.google.com/app/apikey) and enable "early access."') with st.container(): c_in_palm, c_button_palm = st.columns([10,2]) with c_in_palm: google_palm = st.text_input("Google PaLM 2 API Key", cfg_private['google_palm'].get('google_palm_api', ''), help='The MakerSuite API key e.g. a 32-character string', placeholder='e.g. SATgthsykuE64FgrrrrEervr3S4455t_geyDeGq', type='password') with st.container(): with c_button_ocr: st.write("##") st.button("Test OCR", on_click=test_API, args=['google_vision',c_in_ocr, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_openai: st.write("##") st.button("Test OpenAI", on_click=test_API, args=['openai',c_in_openai, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_azure: st.write("##") st.button("Test Azure OpenAI", on_click=test_API, args=['azure_openai',c_in_azure, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_palm: st.write("##") st.button("Test PaLM 2", on_click=test_API, args=['palm',c_in_palm, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) st.button("Set API Keys",type='primary', on_click=save_changes_to_API_keys, args=[cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) if st.button('Proceed to VoucherVision'): st.session_state.proceed_to_private = False st.session_state.proceed_to_main = True def test_API(api, message_loc, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Save the API keys save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key,azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm) with st.spinner('Performing validation checks...'): if api == 'google_vision': print("*** Google Vision OCR API Key ***") try: demo_config_path = os.path.join(st.session_state.dir_home,'demo','validation_configs','google_vision_ocr_test.yaml') demo_images_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_images') demo_out_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_output','run_name') create_google_ocr_yaml_config(demo_config_path, demo_images_path, demo_out_path) voucher_vision_OCR_test(demo_config_path, st.session_state.dir_home, None, demo_images_path) with message_loc: st.success("Google Vision OCR API Key Valid :white_check_mark:") return True except Exception as e: with message_loc: st.error(f"Google Vision OCR API Key Failed! {e}") return False elif api == 'openai': print("*** OpenAI API Key ***") try: if run_api_tests('openai'): with message_loc: st.success("OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error("OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"OpenAI API Key Failed:exclamation: {e}") elif api == 'azure_openai': print("*** Azure OpenAI API Key ***") try: if run_api_tests('azure_openai'): with message_loc: st.success("Azure OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation: {e}") elif api == 'palm': print("*** Google PaLM 2 API Key ***") try: if run_api_tests('palm'): with message_loc: st.success("Google PaLM 2 API Key Valid :white_check_mark:") else: with message_loc: st.error("Google PaLM 2 API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Google PaLM 2 API Key Failed:exclamation: {e}") def save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Update the configuration dictionary with the new values cfg_private['openai']['OPENAI_API_KEY'] = openai_api_key cfg_private['openai_azure']['api_version'] = azure_openai_api_version cfg_private['openai_azure']['openai_api_key'] = azure_openai_api_key cfg_private['openai_azure']['openai_api_base'] = azure_openai_api_base cfg_private['openai_azure']['openai_organization'] = azure_openai_organization cfg_private['openai_azure']['openai_api_type'] = azure_openai_api_type cfg_private['google_cloud']['path_json_file'] = google_vision cfg_private['google_palm']['google_palm_api'] = google_palm # Call the function to write the updated configuration to the YAML file write_config_file(cfg_private, st.session_state.dir_home, filename="PRIVATE_DATA.yaml") st.session_state.private_file = does_private_file_exist() # Function to load a YAML file and update session_state def load_prompt_yaml(filename): with open(filename, 'r') as file: st.session_state['prompt_info'] = yaml.safe_load(file) st.session_state['prompt_author'] = st.session_state['prompt_info'].get('prompt_author', st.session_state['default_prompt_author']) st.session_state['prompt_author_institution'] = st.session_state['prompt_info'].get('prompt_author_institution', st.session_state['default_prompt_author_institution']) st.session_state['prompt_description'] = st.session_state['prompt_info'].get('prompt_description', st.session_state['default_prompt_description']) st.session_state['instructions'] = st.session_state['prompt_info'].get('instructions', st.session_state['default_instructions']) st.session_state['json_formatting_instructions'] = st.session_state['prompt_info'].get('json_formatting_instructions', st.session_state['default_json_formatting_instructions'] ) st.session_state['rules'] = st.session_state['prompt_info'].get('rules', {}) st.session_state['mapping'] = st.session_state['prompt_info'].get('mapping', {}) st.session_state['LLM'] = st.session_state['prompt_info'].get('LLM', 'gpt') # Placeholder: st.session_state['assigned_columns'] = list(chain.from_iterable(st.session_state['mapping'].values())) def save_prompt_yaml(filename): yaml_content = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') filepath = os.path.join(dir_prompt, f"{filename}.yaml") with open(filepath, 'w') as file: yaml.safe_dump(dict(yaml_content), file, sort_keys=False) st.success(f"Prompt saved as '{filename}.yaml'.") def check_unique_mapping_assignments(): if len(st.session_state['assigned_columns']) != len(set(st.session_state['assigned_columns'])): st.error("Each column name must be assigned to only one category.") return False else: st.success("Mapping confirmed.") return True def check_prompt_yaml_filename(fname): # Check if the filename only contains letters, numbers, underscores, and dashes pattern = r'^[\w-]+$' # The \w matches any alphanumeric character and is equivalent to the character class [a-zA-Z0-9_]. # The hyphen - is literally matched. if re.match(pattern, fname): return True else: return False def btn_load_prompt(selected_yaml_file, dir_prompt): if selected_yaml_file: yaml_file_path = os.path.join(dir_prompt, selected_yaml_file) load_prompt_yaml(yaml_file_path) elif not selected_yaml_file: # Directly assigning default values since no file is selected st.session_state['prompt_info'] = {} st.session_state['prompt_author'] = st.session_state['default_prompt_author'] st.session_state['prompt_author_institution'] = st.session_state['default_prompt_author_institution'] st.session_state['prompt_description'] = st.session_state['default_prompt_description'] st.session_state['instructions'] = st.session_state['default_instructions'] st.session_state['json_formatting_instructions'] = st.session_state['default_json_formatting_instructions'] st.session_state['rules'] = {} st.session_state['LLM'] = 'gpt' st.session_state['assigned_columns'] = [] st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], 'LLM': st.session_state['LLM'] } def build_LLM_prompt_config(): st.session_state['assigned_columns'] = [] st.session_state['default_prompt_author'] = 'unknown' st.session_state['default_prompt_author_institution'] = 'unknown' st.session_state['default_prompt_description'] = 'unknown' st.session_state['default_instructions'] = """1. Refactor the unstructured OCR text into a dictionary based on the JSON structure outlined below. 2. You should map the unstructured OCR text to the appropriate JSON key and then populate the field based on its rules. 3. Some JSON key fields are permitted to remain empty if the corresponding information is not found in the unstructured OCR text. 4. Ignore any information in the OCR text that doesn't fit into the defined JSON structure. 5. Duplicate dictionary fields are not allowed. 6. Ensure that all JSON keys are in lowercase. 7. Ensure that new JSON field values follow sentence case capitalization. 8. Ensure all key-value pairs in the JSON dictionary strictly adhere to the format and data types specified in the template. 9. Ensure the output JSON string is valid JSON format. It should not have trailing commas or unquoted keys. 10. Only return a JSON dictionary represented as a string. You should not explain your answer.""" st.session_state['default_json_formatting_instructions'] = """The next section of instructions outlines how to format the JSON dictionary. The keys are the same as those of the final formatted JSON object. For each key there is a format requirement that specifies how to transcribe the information for that key. The possible formatting options are: 1. "verbatim transcription" - field is populated with verbatim text from the unformatted OCR. 2. "spell check transcription" - field is populated with spelling corrected text from the unformatted OCR. 3. "boolean yes no" - field is populated with only yes or no. 4. "boolean 1 0" - field is populated with only 1 or 0. 5. "integer" - field is populated with only an integer. 6. "[list]" - field is populated from one of the values in the list. 7. "yyyy-mm-dd" - field is populated with a date in the format year-month-day. The desired null value is also given. Populate the field with the null value of the information for that key is not present in the unformatted OCR text.""" # Start building the Streamlit app col_prompt_main_left, ___, col_prompt_main_right = st.columns([6,1,3]) with col_prompt_main_left: st.title("Custom LLM Prompt Builder") st.subheader('About') st.write("This form allows you to craft a prompt for your specific task.") st.subheader('How it works') st.write("1. Edit this page until you are happy with your instructions. We recommend looking at the basic structure, writing down your prompt inforamtion in a Word document so that it does not randomly disappear, and then copying and pasting that info into this form once your whole prompt structure is defined.") st.write("2. After you enter all of your prompt instructions, click 'Save' and give your file a name.") st.write("3. This file will be saved as a yaml configuration file in the `..VoucherVision/custom_prompts` folder.") st.write("4. When you go back the main VoucherVision page you will now see your custom prompt available in the 'Prompt Version' dropdown menu.") st.write("5. Select your custom prompt. Note, your prompt will only be available for the LLM that you set when filling out the form below.") dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') yaml_files = [f for f in os.listdir(dir_prompt) if f.endswith('.yaml')] col_load_text, col_load_btn = st.columns([8,2]) with col_load_text: # Dropdown for selecting a YAML file selected_yaml_file = st.selectbox('Select a prompt YAML file to load:', [''] + yaml_files) with col_load_btn: st.write('##') # Button to load the selected prompt st.button('Load Prompt', on_click=btn_load_prompt, args=[selected_yaml_file, dir_prompt]) # Prompt Author Information st.header("Prompt Author Information") st.write("We value community contributions! Please provide your name(s) (or pseudonym if you prefer) for credit. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author'] = st.text_input("Enter names of prompt author(s)", value=st.session_state['default_prompt_author']) st.write("Please provide your institution name. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author_institution'] = st.text_input("Enter name of institution", value=st.session_state['default_prompt_author_institution']) st.write("Please provide a description of your prompt and its intended task. Is it designed for a specific collection? Taxa? Database structure?") st.session_state['prompt_description'] = st.text_input("Enter description of prompt", value=st.session_state['default_prompt_description']) st.write('---') st.header("Set LLM Model Type") # Define the options for the dropdown llm_options = ['gpt', 'palm'] # Create the dropdown and set the value to session_state['LLM'] st.write("Which LLM is this prompt designed for? This will not restrict its use to a specific LLM, but some prompts will behave in different ways across models.") st.write("For example, VoucherVision will automatically add multiple JSON formatting blocks to all PaLM 2 prompts to coax PaLM 2 to return a valid JSON object.") st.session_state['LLM'] = st.selectbox('Set LLM', llm_options, index=llm_options.index(st.session_state.get('LLM', 'gpt'))) st.write('---') # Instructions Section st.header("Instructions") st.write("These are the general instructions that guide the LLM through the transcription task. We recommend using the default instructions unless you have a specific reason to change them.") st.session_state['instructions'] = st.text_area("Enter instructions", value=st.session_state['default_instructions'].strip(), height=350, disabled=True) st.write('---') # Column Instructions Section st.header("JSON Formatting Instructions") st.write("The following section tells the LLM how we want to structure the JSON dictionary. We do not recommend changing this section because it would likely result in unstable and inconsistent behavior.") st.session_state['json_formatting_instructions'] = st.text_area("Enter column instructions", value=st.session_state['default_json_formatting_instructions'], height=350, disabled=True) st.write('---') col_left, col_right = st.columns([6,4]) with col_left: st.subheader('Add/Edit Columns') # Initialize rules in session state if not already present if 'rules' not in st.session_state or not st.session_state['rules']: st.session_state['rules']['Dictionary'] = { "catalog_number": { "format": "verbatim transcription", "null_value": "", "description": "The barcode identifier, typically a number with at least 6 digits, but fewer than 30 digits." } } st.session_state['rules']['SpeciesName'] = { "taxonomy": ["Genus_species"] } # Layout for adding a new column name # col_text, col_textbtn = st.columns([8, 2]) # with col_text: new_column_name = st.text_input("Enter a new column name:") # with col_textbtn: # st.write('##') if st.button("Add New Column") and new_column_name: if new_column_name not in st.session_state['rules']['Dictionary']: st.session_state['rules']['Dictionary'][new_column_name] = {"format": "", "null_value": "", "description": ""} st.success(f"New column '{new_column_name}' added. Now you can edit its properties.") else: st.error("Column name already exists. Please enter a unique column name.") # Get columns excluding the protected "catalog_number" st.write('#') editable_columns = [col for col in st.session_state['rules']['Dictionary'] if col != "catalog_number"] column_name = st.selectbox("Select a column to edit:", [""] + editable_columns) # Handle rules editing current_rule = st.session_state['rules']['Dictionary'].get(column_name, { "format": "", "null_value": "", "description": "" }) if 'selected_column' not in st.session_state: st.session_state['selected_column'] = column_name # Form for input fields with st.form(key='rule_form'): format_options = ["verbatim transcription", "spell check transcription", "boolean yes no", "boolean 1 0", "integer", "[list]", "yyyy-mm-dd"] current_rule["format"] = st.selectbox("Format:", format_options, index=format_options.index(current_rule["format"]) if current_rule["format"] else 0) current_rule["null_value"] = st.text_input("Null value:", value=current_rule["null_value"]) current_rule["description"] = st.text_area("Description:", value=current_rule["description"]) commit_button = st.form_submit_button("Commit Column") default_rule = { "format": format_options[0], # default format "null_value": "", # default null value "description": "", # default description } if st.session_state['selected_column'] != column_name: # Column has changed. Update the session_state selected column. st.session_state['selected_column'] = column_name # Reset the current rule to the default for this new column, or a blank rule if not set. current_rule = st.session_state['rules']['Dictionary'].get(column_name, default_rule.copy()) # Handle commit action if commit_button and column_name: # Commit the rules to the session state. st.session_state['rules']['Dictionary'][column_name] = current_rule.copy() st.success(f"Column '{column_name}' added/updated in rules.") # Force the form to reset by clearing the fields from the session state st.session_state.pop('selected_column', None) # Clear the selected column to force reset # st.session_state['rules'][column_name] = current_rule # st.success(f"Column '{column_name}' added/updated in rules.") # # Reset current_rule to default values for the next input # current_rule["format"] = default_rule["format"] # current_rule["null_value"] = default_rule["null_value"] # current_rule["description"] = default_rule["description"] # # To ensure that the form fields are reset, we can clear them from the session state # for key in current_rule.keys(): # st.session_state[key] = default_rule[key] # Layout for removing an existing column # del_col, del_colbtn = st.columns([8, 2]) # with del_col: delete_column_name = st.selectbox("Select a column to delete:", [""] + editable_columns, key='delete_column') # with del_colbtn: # st.write('##') if st.button("Delete Column") and delete_column_name: del st.session_state['rules'][delete_column_name] st.success(f"Column '{delete_column_name}' removed from rules.") with col_right: # Display the current state of the JSON rules st.subheader('Formatted Columns') st.json(st.session_state['rules']['Dictionary']) # st.subheader('All Prompt Info') # st.json(st.session_state['prompt_info']) st.write('---') col_left_mapping, col_right_mapping = st.columns([6,4]) with col_left_mapping: st.header("Mapping") st.write("Assign each column name to a single category.") st.session_state['refresh_mapping'] = False # Dynamically create a list of all column names that can be assigned # This assumes that the column names are the keys in the dictionary under 'rules' all_column_names = list(st.session_state['rules']['Dictionary'].keys()) categories = ['TAXONOMY', 'GEOGRAPHY', 'LOCALITY', 'COLLECTING', 'MISCELLANEOUS'] if ('mapping' not in st.session_state) or (st.session_state['mapping'] == {}): st.session_state['mapping'] = {category: [] for category in categories} for category in categories: # Filter out the already assigned columns available_columns = [col for col in all_column_names if col not in st.session_state['assigned_columns'] or col in st.session_state['mapping'].get(category, [])] # Ensure the current mapping is a subset of the available options current_mapping = [col for col in st.session_state['mapping'].get(category, []) if col in available_columns] # Provide a safe default if the current mapping is empty or contains invalid options safe_default = current_mapping if all(col in available_columns for col in current_mapping) else [] # Create a multi-select widget for the category with a safe default selected_columns = st.multiselect( f"Select columns for {category}:", available_columns, default=safe_default, key=f"mapping_{category}" ) # Update the assigned_columns based on the selections for col in current_mapping: if col not in selected_columns and col in st.session_state['assigned_columns']: st.session_state['assigned_columns'].remove(col) st.session_state['refresh_mapping'] = True for col in selected_columns: if col not in st.session_state['assigned_columns']: st.session_state['assigned_columns'].append(col) st.session_state['refresh_mapping'] = True # Update the mapping in session state when there's a change st.session_state['mapping'][category] = selected_columns if st.session_state['refresh_mapping']: st.session_state['refresh_mapping'] = False # Button to confirm and save the mapping configuration if st.button('Confirm Mapping'): if check_unique_mapping_assignments(): # Proceed with further actions since the mapping is confirmed and unique pass with col_right_mapping: # Display the current state of the JSON rules st.subheader('Formatted Column Maps') st.json(st.session_state['mapping']) col_left_save, col_right_save = st.columns([6,4]) with col_left_save: # Input for new file name new_filename = st.text_input("Enter filename to save your prompt as a configuration YAML:",placeholder='my_prompt_name') # Button to save the new YAML file if st.button('Save YAML', type='primary'): if new_filename: if check_unique_mapping_assignments(): if check_prompt_yaml_filename(new_filename): save_prompt_yaml(new_filename) else: st.error("File name can only contain letters, numbers, underscores, and dashes. Cannot contain spaces.") else: st.error("Mapping contains an error. Make sure that each column is assigned to only ***one*** category.") else: st.error("Please enter a filename.") if st.button('Exit'): st.session_state.proceed_to_build_llm_prompt = False st.session_state.proceed_to_main = True st.rerun() with col_prompt_main_right: st.subheader('All Prompt Components') st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } st.json(st.session_state['prompt_info']) def show_header_welcome(): st.session_state.logo_path = os.path.join(st.session_state.dir_home, 'img','logo.png') st.session_state.logo = Image.open(st.session_state.logo_path) st.image(st.session_state.logo, width=250) def determine_n_images(): try: # Check if 'dir_uploaded_images' key exists and it is not empty if 'dir_uploaded_images' in st and st['dir_uploaded_images']: dir_path = st['dir_uploaded_images'] # This would be the path to the directory return len([f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]) else: return None except: return None def content_header(): col_run_1, col_run_2, col_run_3 = st.columns([4,4,2]) col_test = st.container() st.write("") st.write("") st.write("") st.write("") st.subheader("Overall Progress") col_run_info_1 = st.columns([1])[0] st.write("") st.write("") st.write("") st.write("") st.header("Configuration Settings") with col_run_info_1: # Progress # Progress # st.subheader('Project') # bar = st.progress(0) # new_text = st.empty() # Placeholder for current step name # progress_report = ProgressReportVV(bar, new_text, n_images=10) # Progress overall_progress_bar = st.progress(0) text_overall = st.empty() # Placeholder for current step name st.subheader('Transcription Progress') batch_progress_bar = st.progress(0) text_batch = st.empty() # Placeholder for current step name progress_report = ProgressReport(overall_progress_bar, batch_progress_bar, text_overall, text_batch) st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.write("If you use VoucherVision frequently, you can change the default values that are auto-populated in the form below. In a text editor or IDE, edit the first few rows in the file `../VoucherVision/vouchervision/VoucherVision_Config_Builder.py`") with col_run_1: show_header_welcome() st.subheader('Run VoucherVision') N_STEPS = 6 if determine_n_images(): st.session_state['processing_add_on'] = f" {determine_n_images()} Images" else: st.session_state['processing_add_on'] = '' if check_if_usable(): if st.button(f"Start Processing{st.session_state['processing_add_on']}", type='primary'): # Define number of overall steps progress_report.set_n_overall(N_STEPS) progress_report.update_overall(f"Starting VoucherVision...") # First, write the config file. write_config_file(st.session_state.config, st.session_state.dir_home, filename="VoucherVision.yaml") path_custom_prompts = os.path.join(st.session_state.dir_home,'custom_prompts',st.session_state.config['leafmachine']['project']['prompt_version']) # Call the machine function. last_JSON_response, total_cost = voucher_vision(None, st.session_state.dir_home, path_custom_prompts, None, progress_report,path_api_cost=os.path.join(st.session_state.dir_home,'api_cost','api_cost.yaml'), is_real_run=True) if total_cost: st.success(f":money_with_wings: This run cost :heavy_dollar_sign:{total_cost:.4f}") # Format the JSON string for display. if last_JSON_response is None: st.markdown(f"Last JSON object in the batch: NONE") else: try: formatted_json = json.dumps(json.loads(last_JSON_response), indent=4, sort_keys=False) except: formatted_json = json.dumps(last_JSON_response, indent=4, sort_keys=False) st.markdown(f"Last JSON object in the batch:\n```\n{formatted_json}\n```") st.balloons() else: st.button("Start Processing", type='primary', disabled=True) st.error(":heavy_exclamation_mark: Required API keys not set. Please visit the 'API Keys' tab and set the Google Vision OCR API key and at least one LLM key.") st.button("Refresh", on_click=refresh) with col_run_2: if st.button("Test GPT"): progress_report.set_n_overall(TestOptionsGPT.get_length()) test_results, JSON_results = run_demo_tests_GPT(progress_report) with col_test: display_test_results(test_results, JSON_results, 'gpt') st.balloons() if st.button("Test PaLM2"): progress_report.set_n_overall(TestOptionsPalm.get_length()) test_results, JSON_results = run_demo_tests_Palm(progress_report) with col_test: display_test_results(test_results, JSON_results, 'palm') st.balloons() with col_run_3: st.subheader('Check GPU') if st.button("GPU"): success, info = test_GPU() if success: st.balloons() for message in info: st.success(message) else: for message in info: st.error(message) def content_tab_settings(): st.header('Project') col_project_1, col_project_2 = st.columns([4,2]) st.write("---") st.header('Input Images') col_local_1, col_local_2 = st.columns([4,2]) st.write("---") st.header('LeafMachine2 Label Collage') col_cropped_1, col_cropped_2 = st.columns([4,4]) st.write("---") st.header('OCR Overlay Image') col_ocr_1, col_ocr_2 = st.columns([4,4]) os.path.join(st.session_state.dir_home, ) ### Project with col_project_1: st.session_state.config['leafmachine']['project']['run_name'] = st.text_input("Run name", st.session_state.config['leafmachine']['project'].get('run_name', '')) st.session_state.config['leafmachine']['project']['dir_output'] = st.text_input("Output directory", st.session_state.config['leafmachine']['project'].get('dir_output', '')) ### Input Images Local with col_local_1: st.session_state.config['leafmachine']['project']['dir_images_local'] = st.text_input("Input images directory", st.session_state.config['leafmachine']['project'].get('dir_images_local', '')) st.session_state.config['leafmachine']['project']['continue_run_from_partial_xlsx'] = st.text_input("Continue run from partially completed project XLSX", st.session_state.config['leafmachine']['project'].get('continue_run_from_partial_xlsx', ''), disabled=True) st.write("---") st.subheader('LLM Version') st.markdown( """ ***Note:*** GPT-4 is 20x more expensive than GPT-3.5 """ ) st.session_state.config['leafmachine']['LLM_version'] = st.selectbox("LLM version", ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"], index=["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"].index(st.session_state.config['leafmachine'].get('LLM_version', 'Azure GPT 4'))) st.write("---") st.subheader('Prompt Version') versions, default_version = get_prompt_versions(st.session_state.config['leafmachine']['LLM_version']) if versions: selected_version = st.session_state.config['leafmachine']['project'].get('prompt_version', default_version) if selected_version not in versions: selected_version = default_version st.session_state.config['leafmachine']['project']['prompt_version'] = st.selectbox("Prompt Version", versions, index=versions.index(selected_version)) with col_cropped_1: default_crops = st.session_state.config['leafmachine']['cropped_components'].get('save_cropped_annotations', ['leaf_whole']) st.write("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. (Requires GPU)") st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox("Use LeafMachine2 label collage for transcriptions", st.session_state.config['leafmachine'].get('use_RGB_label_images', False)) st.session_state.config['leafmachine']['cropped_components']['save_cropped_annotations'] = st.multiselect("Components to crop", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights', 'leaf_whole', 'leaf_partial', 'leaflet', 'seed_fruit_one', 'seed_fruit_many', 'flower_one', 'flower_many', 'bud','specimen','roots','wood'],default=default_crops) with col_cropped_2: ba = os.path.join(st.session_state.dir_home,'demo', 'ba','ba2.png') image = Image.open(ba) st.image(image, caption='LeafMachine2 Collage', output_format = "PNG") with col_ocr_1: st.write('This will plot bounding boxes around all text that Google Vision was able to detect. If there are no boxes around text, then the OCR failed, so that missing text will not be seen by the LLM when it is creating the JSON object. The created image will be viewable in the VoucherVisionEditor.') st.session_state.config['leafmachine']['do_create_OCR_helper_image'] = st.checkbox("Create image showing an overlay of the OCR detections", st.session_state.config['leafmachine'].get('do_create_OCR_helper_image', False)) with col_ocr_2: ocr = os.path.join(st.session_state.dir_home,'demo', 'ba','ocr.png') image_ocr = Image.open(ocr) st.image(image_ocr, caption='OCR Overlay Images', output_format = "PNG") def content_tab_component(): st.header('Archival Components') ACD_version = st.selectbox("Archival Component Detector (ACD) Version", ["Version 2.1", "Version 2.2"]) ACD_confidence_default = int(st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] * 100) ACD_confidence = st.number_input("ACD Confidence Threshold (%)", min_value=0, max_value=100,value=ACD_confidence_default) st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] = float(ACD_confidence/100) st.session_state.config['leafmachine']['archival_component_detector']['do_save_prediction_overlay_images'] = st.checkbox("Save Archival Prediction Overlay Images", st.session_state.config['leafmachine']['archival_component_detector'].get('do_save_prediction_overlay_images', True)) st.session_state.config['leafmachine']['archival_component_detector']['ignore_objects_for_overlay'] = st.multiselect("Hide Archival Components in Prediction Overlay Images", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',], default=[]) # Depending on the selected version, set the configuration if ACD_version == "Version 2.1": st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' elif ACD_version == "Version 2.2": #TODO update this to version 2.2 st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' def content_tab_processing(): st.header('Processing Options') col_processing_1, col_processing_2 = st.columns([2,2,]) with col_processing_1: st.subheader('Compute Options') st.session_state.config['leafmachine']['project']['num_workers'] = st.number_input("Number of CPU workers", value=st.session_state.config['leafmachine']['project'].get('num_workers', 1), disabled=True) st.session_state.config['leafmachine']['project']['batch_size'] = st.number_input("Batch size", value=st.session_state.config['leafmachine']['project'].get('batch_size', 500), help='Sets the batch size for the LeafMachine2 cropping. If computer RAM is filled, lower this value to ~100.') with col_processing_2: st.subheader('Misc') st.session_state.config['leafmachine']['project']['prefix_removal'] = st.text_input("Remove prefix from catalog number", st.session_state.config['leafmachine']['project'].get('prefix_removal', '')) st.session_state.config['leafmachine']['project']['suffix_removal'] = st.text_input("Remove suffix from catalog number", st.session_state.config['leafmachine']['project'].get('suffix_removal', '')) st.session_state.config['leafmachine']['project']['catalog_numerical_only'] = st.checkbox("Require 'Catalog Number' to be numerical only", st.session_state.config['leafmachine']['project'].get('catalog_numerical_only', True)) ### Logging and Image Validation - col_v1 st.header('Logging and Image Validation') col_v1, col_v2 = st.columns(2) with col_v1: st.session_state.config['leafmachine']['do']['check_for_illegal_filenames'] = st.checkbox("Check for illegal filenames", st.session_state.config['leafmachine']['do'].get('check_for_illegal_filenames', True)) st.session_state.config['leafmachine']['do']['check_for_corrupt_images_make_vertical'] = st.checkbox("Check for corrupt images", st.session_state.config['leafmachine']['do'].get('check_for_corrupt_images_make_vertical', True)) st.session_state.config['leafmachine']['print']['verbose'] = st.checkbox("Print verbose", st.session_state.config['leafmachine']['print'].get('verbose', True)) st.session_state.config['leafmachine']['print']['optional_warnings'] = st.checkbox("Show optional warnings", st.session_state.config['leafmachine']['print'].get('optional_warnings', True)) with col_v2: log_level = st.session_state.config['leafmachine']['logging'].get('log_level', None) log_level_display = log_level if log_level is not None else 'default' selected_log_level = st.selectbox("Logging Level", ['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'], index=['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'].index(log_level_display)) if selected_log_level == 'default': st.session_state.config['leafmachine']['logging']['log_level'] = None else: st.session_state.config['leafmachine']['logging']['log_level'] = selected_log_level def content_tab_domain(): st.header('Embeddings Database') col_emb_1, col_emb_2 = st.columns([4,2]) with col_emb_1: st.markdown( """ VoucherVision includes the option of using domain knowledge inside of the dynamically generated prompts. The OCR text is queried against a database of existing label transcriptions. The most similar existing transcriptions act as an example of what the LLM should emulate and are shown to the LLM as JSON objects. VoucherVision uses cosine similarity search to return the most similar existing transcription. - Note: Using domain knowledge may increase the chance that foreign text is included in the final transcription - Disabling this feature will show the LLM multiple examples of an empty JSON skeleton structure instead - Enabling this option requires a GPU with at least 8GB of VRAM - The domain knowledge files can be located in the directory "../VoucherVision/domain_knowledge". On first run the embeddings database must be created, which takes time. If the database creation runs each time you use VoucherVision, then something is wrong. """ ) st.write(f"Domain Knowledge is only available for the following prompts:") for available_prompts in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.markdown(f"- {available_prompts}") if st.session_state.config['leafmachine']['project']['prompt_version'] in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", True, disabled=True) else: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", False, disabled=True) st.write("") if st.session_state.config['leafmachine']['project']['use_domain_knowledge']: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', '')) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False)) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', '')) else: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', ''), disabled=True) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False), disabled=True) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', ''), disabled=True) def render_expense_report_summary(): expense_summary = st.session_state.expense_summary expense_report = st.session_state.expense_report st.header('Expense Report Summary') if expense_summary: st.metric(label="Total Cost", value=f"${round(expense_summary['total_cost_sum'], 4):,}") col1, col2 = st.columns(2) # Run count and total costs with col1: st.metric(label="Run Count", value=expense_summary['run_count']) st.metric(label="Tokens In", value=f"{expense_summary['tokens_in_sum']:,}") # Token information with col2: st.metric(label="Total Images", value=expense_summary['n_images_sum']) st.metric(label="Tokens Out", value=f"{expense_summary['tokens_out_sum']:,}") # Calculate cost proportion per image for each API version st.subheader('Average Cost per Image by API Version') cost_labels = [] cost_values = [] total_images = 0 cost_per_image_dict = {} # Iterate through the expense report to accumulate costs and image counts for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] n_images = row['n_images'] total_images += n_images # Keep track of total images processed if api_version not in cost_per_image_dict: cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} cost_per_image_dict[api_version]['total_cost'] += total_cost cost_per_image_dict[api_version]['n_images'] += n_images api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')):
validate_dir(os.path.join(st.session_state.dir_home,'expense_report'))
14
2023-10-30 23:25:20+00:00
16k
medsagou/massar-direction-sagoubot
main.py
[ { "identifier": "C_File", "path": "utilities/Class_Files.py", "snippet": "class C_File():\n #____________________________________________________________________________________________________________________________________________________________\n # Le constructeur d'une instance d'un fichier\n # Ce constructeur permet d'attribuer à une instance de fichier son nom (vide par défaut) \n # Ce constructeur permet de spécifier le séparateur des éléments s'il existe (également vide par défauté)su\n # Un séparateur peut être un \";\", une \",\" un \"#', etc. \n def __init__(self,file_name=\"\",sep=\";\", sep2=\"+\"):\n self.nomFichier=file_name\n self.separateur=sep\n self.separateur2=sep2\n \n #____________________________________________________________________________________________________________________________________________________________\n # Vérifie si un fichier exite ou non.\n def existe_fichier(self):\n if os.path.isfile(self.nomFichier):\n return True\n else:\n return False\n #____________________________________________________________________________________________________________________________________________________________\n # Vérifie si un fichier exite ou non.\n def specifier_Nom_fichier(self):\n while True:\n print(\"\\n\")\n print(\"Instanciation et saisie d'un nouveau fichier de travail :\\n\")\n self.nomFichier=input(\"Entrez le chemin de votre fichier : \"+\"\\n\")\n if self.existe_fichier():\n print(\"le fichier spécifié existe déjà dans le répertoire courant, veuillez recommencer\")\n else:\n break \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide sans supprimer le fichier de même nom s'il existe\n def create_file(self):\n f = open(self.nomFichier,\"x\") #Création d'un fichier vide. Ici, le fichier n'est pas écrasé contrairement au mode 'w' \n f.close()\n \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide avec suppression du fichier de même nom s'il existe\n def create_file_2(self):\n f = open(self.nomFichier,\"w\") #Création d'un fichier vide. Ici, le fichier existant qui porte le même nom est écrasé contrairement mode 'x' \n f.close()\n \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide avec possibilité de dialogue avant de supprimer un fichier de même nom s'il existe dans le même répertoire (dossier)\n def creer_fichier_3(self):\n if os.path.exists(self.nomFichier): # Condition pour vérifier si jamais le fichier à créer existe déjà dans le répertoire courant\n print(\"Il existe un fichier qui porte le même nom\"+\"\\n\")\n print(\"Voulez-vous l'écraser ?\")\n while True: # Itération (boucle infinie) pour prévenir les événetuelles erreurs de frappe (autre chose que '1' et '2') (Attention, il faut absolument provoquer quelque part dans la boucle une rupture avec \"break\" )\n # Menu local pour exposer les dexu cas de figures (on peut également créer une instance de la classe Menu ici)\n print(\"Veuillez choisir ce qu'il faut faire, selon les options suivantes : \"+\"\\n\")\n print(\"1. Ecraser le fichier existant\")\n print(\"2. Garder le fichier\")\n rep=input(\"Veuillez taper 1 ou 2 \")\n if rep=='1': # Cas où l'utilisateur choisit d'écraser le fichier existant \n self.creer_fichier_2() # Appel à laméthode creer_fichier_2()\n break # rupture de la boucle d'itération => on sort de la boucle infinie while\n elif rep=='2': # Cas où l'utilisateur choisit de ne pas écraser le fichier existant (pas besoin dans ce cas de faire appel à la méthode creer_fichier_1()) \n break # rupture de la boucle d'itération => on sort de la boucle infinie while\n else: # cas où l'utilisateur n'a tapé ni \"1\", ni\"2\"\n print(\"Erreur de frappe\"+\"\\n\")\n else: # cas où le fichier à créer n'existe pas dans le répertoire courant\n self.creer_fichier_1() # Appel à laméthode creer_fichier_1()\n \n #____________________________________________________________________________________________________________________________________________________________\n def ActiverFichier(self,Message):\n print(Message)\n self.specifier_Nom_fichier()\n self.creer_fichier_3() \n \n #____________________________________________________________________________________________________________________________________________________________\n # Supprimer un fichier\n def supprimer_fichier(self):\n if os.path.exists(self.nomFichier): # Condition pour vérifier si jamais le fichier à créer existe déjà dans le répertoire courant\n os.remove(self.nomFichier)\n print(\"Le fichier a été supprimé\")\n else:\n print(\"Le fichier spécifié n'existe pas dans le répertoire courant\")\n\n #____________________________________________________________________________________________________________________________________________________________\n # Ajouter un élément\n def enregistrer_Element(self,Element):\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode lecture.\n F.write(Element)\n\n #____________________________________________________________________________________________________________________________________________________________\n # Ajouter un ensemble d'éléments sous forme de liste\n def Liste_to_Fichier(self,Liste): # 'creer_Fichier_Avec_Liste_Elements(self,ListeElements)' Créer d'un fichier à partir d'une liste : chaque élément de la liste représente une ligne du fichier\n with open(self.nomFichier,'w') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n F.writelines(Liste)\n def Liste_stript(self, L):\n for i in range(len(L)):\n L[i] = L[i].strip()\n return L\n\n def str_to_fichier(self,string):\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n F.write(string)\n F.write(\"\\n\")\n return\n def str_to_fichier2(self,string):\n with open(self.nomFichier,'w') as F:\n F.write(string)\n F.write(\"\\n\")\n return\n\n def dict_to_file(self, D):\n if type(D) == dict and D != {}:\n with open(self.nomFichier, 'w') as F:\n for c, v in D.items():\n F.write(str(c) + \";\" + str(v))\n F.write(\"\\n\")\n return True\n else:\n print_error(\"WE HAD A PROBLEM WHILE SAVING YOUR DICT\", console=self.console)\n \n def Liste_to_str_to_Fichier(self,Liste_1): \n Liste = self.Liste_to_Str1(Liste_1)\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n \n F.writelines(Liste) \n F.writelines('\\n')\n #____________________________________________________________________________________________________________________________________________________________\n # Lire le contenu d'un fichier et le retourne en le plaçant dans une liste\n def fichier_to_Liste(self): # extration d'une liste depuis un fichier : chaque ligne du fichier représente un élément de cette liste\n with open(self.nomFichier, 'r') as f: # Ouverture du fichier en mode lecture.\n return f.readlines()\n def Fichier_to_str(self):\n with open (self.nomFichier,'r') as f:\n return f.read()\n\n def supprimer_element(self,element):\n ch = self.Fichier_to_str()\n print(ch)\n chh = ch.replace(element,'')\n print(chh)\n self.str_to_fichier(ch)\n \n #____________________________________________________________________________________________________________________________________________________________\n # Afficher un fichier ligne par ligne\n def afficher_lignes_fichier(self):\n print(\"\\n Affichage des lignes du fichier \\n\")\n with open(self.nomFichier, 'r') as F:\n for ligne in F:\n print (ligne) \n print(\"\\n Fin affichage des lignes du fichier\")\n\n #____________________________________________________________________________________________________________________________________________________________\n # Afficher un fichier ligne par ligne et pour chaque ligne mot par mot\n def afficher_mots_fichier(self):\n i=0 # uttiliser comme un simple compteur pour afficher dans un message afin de le rendre plus explicite\n with open(self.nomFichier, 'r') as F:\n for ligne in F:\n i+=1\n print(\"Affichage des éléments du contenu la ligne : \",i,\"\\n\") # message explicite\n L=C_Liste(ligne.split(self.separateur)) # Création d'une instance de la classe 'C_Liste'\n L.afficher_Liste() # ici on fait appel à la méthode 'afficher_Liste()' de la classe 'C_Liste'\n\n\n def existe_element_fichier(self,Element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element in Liste_Lignes_du_Fichier[i]:\n return(True)\n return(False)\n \n \n def existe_element_fichier2(self,element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n L=Liste_Lignes_du_Fichier[i].split(self.separateur)\n if element in L:\n return(True)\n return(False)\n \n \n def existe_element_fichier3(self,element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n L=Liste_Lignes_du_Fichier[i].split(self.separateur)\n if element in L:\n return(True, Liste_Lignes_du_Fichier[i])\n return(False,False)\n\n \n \n def modifier_element_fichier(self,Element):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n Ligne_Courante=Liste_Lignes_du_Fichier[i] # La variable 'Ligne_Courante' est utilisée pour donner plus de clarté sur le plan pédagogique, on peut à la place utiliser directement directement 'Liste_Lignes_du_Fichier[i]'\n Liste_Elements_Ligne_Courante=self.Str_to_List(Ligne_Courante) # Ici on transforme la chaîne de caractère 'Ligne_Courante' en une liste 'Liste_Elements_Ligne_Courante' \n if Element not in Liste_Elements_Ligne_Courante:\n Nouvelle_Liste=Nouvelle_Liste+[Ligne_Courante+'\\n']\n else:\n Nouvelle_Liste=C_Liste(Liste_Elements_Ligne_Courante) # Nouvelle_Liste est une instance de la classe C_Liste\n Nouvelle_Liste_Elements=Nouvelle_Liste.changer_element(Element)\n Nouvelle_Ligne_Modifiee=self.Liste_to_Str(Nouvelle_Liste_Elements)\n Nouvelle_Liste=Nouvelle_Liste+[Nouvelle_Ligne_Modifiee+'\\n'] \n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def ajouter_a_la_fin_de_la_ligne(self,ID,Element,sep):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n Ligne_Courante=Liste_Lignes_du_Fichier[i] # La variable 'Ligne_Courante' est utilisée pour donner plus de clarté sur le plan pédagogique, on peut à la place utiliser directement directement 'Liste_Lignes_du_Fichier[i]'\n Liste_Elements_Ligne_Courante=self.str_to_liste(Ligne_Courante) # Ici on transforme la chaîne de caractère 'Ligne_Courante' en une liste 'Liste_Elements_Ligne_Courante' \n if ID not in Liste_Elements_Ligne_Courante:\n Nouvelle_Liste=Nouvelle_Liste+[Ligne_Courante+'\\n']\n else:\n Liste_Elements_Ligne_Courante[-1] = Liste_Elements_Ligne_Courante[-1].replace('\\n','') +sep+ str(Element)\n \n Nouvelle_Liste_Elements=Liste_Elements_Ligne_Courante\n Nouvelle_Ligne_Modifiee=self.Liste_to_Str1(Nouvelle_Liste_Elements)\n Nouvelle_Liste=Nouvelle_Liste+[Nouvelle_Ligne_Modifiee+'\\n'] \n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n \n def Liste_to_Str1(self,Liste_Elements):\n return self.separateur.join(map(str, Liste_Elements))\n \n def Liste_to_Str2(self,Liste_Elements):\n return self.separateur2.join(Liste_Elements)\n \n def supprimer_element_fichier(self,Element):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]+'\\n']\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def supprimer_ligne_fichier(self,Element_ligne):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n continue\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def supprimer_ligne_fichier2(self,Element_ligne):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne+\"\\n\" not in Liste_Lignes_du_Fichier[i].split(self.separateur)[-1].split(self.separateur2) and Element_ligne not in Liste_Lignes_du_Fichier[i].split(self.separateur)[-1].split(self.separateur2):\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n continue\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) #\n \n def modiffier_ligne(self,Element_ligne,nv_ligne):\n Nouvelle_Liste=[] \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() \n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n Nouvelle_Liste = Nouvelle_Liste+[nv_ligne + '\\n']\n self.Liste_to_Fichier(Nouvelle_Liste) #\n return\n \n \n\n def str_to_liste(self, string):\n return string.split(self.separateur)\n \n \n def nbre_ligne(self):\n return len(self.Fichier_to_Liste())\n \n\n def str_to_liste2(self, string):\n return string.split(self.separateur2)" }, { "identifier": "C_Dossier", "path": "utilities/Class_Files.py", "snippet": "class C_Dossier():\n\n \n def __init__(self,sep=\"\"):\n self.separateur=sep\n \n def dossier_courant(self):\n return os.getcwd()\n\n def existe_dossier(self,Chemin):\n if os.path.exists(Chemin) :\n return True\n else:\n return False \n \n def changer_dossier(self,Chemin):\n if C_Dossier.existe_dossier(Chemin):\n return(chdir(Chemin))\n \n \n def creer_dossier(self,Chemin):\n if not C_Dossier.existe_dossier(Chemin):\n return(mkdir(Chemin))" }, { "identifier": "Read_Db", "path": "absence_app/Read_XLSB_File.py", "snippet": "class Read_Db:\n def __init__(self, input_file = r\"data_to_manage/file_data.xls\", template_file = \"data_to_manage/template.xlsx\", output_file = \"data_to_manage/absence.xlsx\", df = \"\", required_classes=[], progress_bar=\"\", console=\"\"):\n self.index = {0: \"CLASS_StudentIndex\",\n 1: \"Niveau\",\n 2: \"class_name\",\n 3: \"student_index\",\n \"Unnamed: 23\": \"CNE\",\n \"Unnamed: 12\": \"nom\",\n \"Unnamed: 16\": \"prenom\"}\n self.input_file = input_file\n self.output_file = output_file\n self.template_file = template_file\n self.df = df\n self.init_cell = [\"A\"]\n self.start_col = 'A'\n self.end_col = 'C'\n # self.workbook_output = self.get_workbook(output_file)\n self.workbook_output = \"\"\n self.required_classes = required_classes\n self.progress_bar = progress_bar\n self.console = console\n\n def get_key(self, val):\n for key, value in self.index.items():\n if val == value:\n return key\n return \"key doesn't exist\"\n\n def get_data_from_xlsb(self):\n xlsb_file = pd.ExcelFile(self.input_file)\n df = xlsb_file.parse('Feuil3', header=None) #\n self.df = df\n return df\n def get_df_from_xls(self):\n xls = pd.ExcelFile(self.input_file)\n workbook = self.get_data_from_xls()\n sheet_names = xls.sheet_names\n data = {}\n for sheet_name in sheet_names:\n sheet = workbook[sheet_name]\n df = pd.read_excel(self.input_file, sheet_name=sheet_name)\n class_name = sheet.cell_value(10, 8)\n data[class_name] = df\n self.df = data\n return data\n\n def get_data_from_xls(self): # new data function\n return xlrd.open_workbook(self.input_file)\n def get_classes_name_from_xls(self):\n workbook = self.get_data_from_xls()\n classes = []\n sheet_names = workbook.sheet_names()\n for sheet_name in sheet_names:\n sheet = workbook[sheet_name]\n class_name = sheet.cell_value(10, 8)\n # print(class_name)\n classes.append(class_name)\n return classes\n\n def get_workbook(self, file_name):\n workbook = openpyxl.load_workbook(file_name)\n return workbook\n\n\n def get_workbook_sheet(self, workbook ,sheet):\n return workbook[sheet]\n\n def add_value_to_sheet(self, worksheet, cell, value):\n cell_to_update = worksheet[cell]\n cell_to_update.value = value\n return\n\n\n def create_copy_sheet(self, class_name = \"\", workbook = \"\", source_sheet = \"\"):\n new_sheet = workbook.copy_worksheet(source_sheet)\n new_sheet.title = class_name\n new_sheet.sheet_view.rightToLeft = True\n return\n\n\n def get_column_list_from_df(self, column_key):\n if self.df == \"\":\n self.get_df_from_xls()\n\n L = list(set(self.df.values[:, column_key].tolist()))\n try:\n L.remove(\"0\")\n except ValueError:\n pass\n try:\n L.remove(0)\n except ValueError:\n pass\n return L\n def restart_workbook_output(self):\n self.workbook_output.close()\n self.workbook_output = self.get_workbook(self.output_file)\n return\n def get_sheet_names_workbout_output(self):\n self.workbook_output = self.get_workbook(self.output_file)\n return self.workbook_output.sheetnames\n\n\n\n\n def create_all_class_sheet(self):\n if check_exist_file(self.output_file):\n # class_in_sheet = self.get_sheet_names_workbout_output()\n # with open(self.output_file, 'w') as f:\n # f.close()\n os.remove(self.output_file)\n print_info(\"WE REMOVED THE OUTPUT FILE TO CREATE NEW ONE\", console=self.console)\n # else:\n # class_in_sheet = []\n # classes_list = self.get_column_list_from_df(column_key=self.get_key(\"class_name\"))\n\n workbook = openpyxl.load_workbook(self.template_file)\n source_sheet = workbook[\"BaseSheet\"]\n classes_list = self.get_classes_name_from_xls()\n # print(classes_list)\n for classe in classes_list:\n # if classe in class_in_sheet:\n # print_error(f\"SHEET FOR {classe} ALREADY EXIST\")\n # continue\n # if not in college just skipit\n if classe.split(\"-\")[0][1:] not in self.required_classes:\n continue\n print_info(f\"CREATE A SHEET FOR {classe} CLASS\", console=self.console)\n if classe != \"\":\n self.create_copy_sheet(class_name=classe, workbook=workbook, source_sheet = source_sheet)\n\n workbook.save(str(self.output_file))\n workbook.close()\n return\n\n def fill_all_class_sheets(self):\n self.create_all_class_sheet()\n # already check above\n if str(self.df) == \"\":\n print_info(\"GETTING THE DATA...\", console=self.console)\n self.get_data_from_xls()\n # print_info(\"RESTARTING WORKSHEET\")\n # self.restart_workbook_output()\n self.workbook_output = self.get_workbook(self.output_file)\n class_in_sheet = list(self.get_sheet_names_workbout_output())\n # print(class_in_sheet)\n for k in range(len(class_in_sheet)):\n # print(f\"{k+1}/{len(class_in_sheet)}\")\n self.progress_bar.set((k+1)/len(class_in_sheet))\n worksheet = self.get_workbook_sheet(workbook = self.workbook_output, sheet=class_in_sheet[k])\n i = 0\n print_info(f\"WORKING ON {class_in_sheet[k]} CLASS DATA TO SHEET\", console=self.console)\n # column = db.df[\"3ASCG-5\"].columns.tolist()\n #\n # for index, row in db.df[\"3ASCG-5\"].iterrows():\n # if pd.isna(row[column[23]]):\n # continue\n # print(row[column[23]], row[column[16]], row[column[12]])\n index_student = 0\n self.get_df_from_xls()\n if class_in_sheet[k] == 'BaseSheet':\n continue\n for index, row in self.df[class_in_sheet[k]].iterrows():\n if pd.isna(row[self.get_key(\"CNE\")]):\n continue\n if index_student == 0:\n index_student += 1\n continue\n i += 1\n # print(row)\n for col in range(ord(self.start_col), ord(self.end_col) + 1):\n if chr(col) == \"A\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i), value=index_student)\n elif chr(col) == \"B\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i), value=row[self.get_key(\"CNE\")])\n elif chr(col) == \"C\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i),\n value=str(row[self.get_key(\"prenom\")] + \" \" + str(row[self.get_key(\"nom\")])))\n self.add_value_to_sheet(worksheet=worksheet, cell=\"BA\" + str(9 + i), value=str(row[self.get_key(\"prenom\")] + \" \" + str(row[self.get_key(\"nom\")])))\n if i > 49:\n return\n\n index_student += 1\n\n\n # add number of students\n self.add_value_to_sheet(worksheet=worksheet, cell=\"AO6\", value=str(i))\n # add class name\n self.add_value_to_sheet(worksheet=worksheet, cell=\"D6\", value=class_in_sheet[k])\n self.workbook_output.save(self.output_file)\n # self.workbook_output.close()\n print_success(\"Your lists is generated successfully\", console=self.console)\n print_success(f\"Your file path: {self.output_file}\", console=self.console)\n return" }, { "identifier": "Absence", "path": "absence_app/Absences.py", "snippet": "class Absence:\n def __init__(self, driver=\"\", console=\"\"):\n self.driver = driver\n self.console = console\n self.data_table_Xpath = \"/html/body/div/div[1]/div[2]/div[2]/section[2]/div[2]/div[1]/div/div/div[2]/div/form/div/div/div/div/div/div/div/div[2]/div/table\"\n self.data_table_reduced_Xpath = '//*[@id=\"DataTables-Table-0\"]/tbody'\n self.row_Xpath = '//*[@id=\"DataTables-Table-0\"]/tbody/tr['\n self.nome_Xpath = ']/td[3]'\n self.CNE_Xpath = ']/td[2]'\n self.select_Xpath = ']/td[4]/select'\n self.h_Xpath = ']/td['\n self.dates = \"\"\n self.searchBtn = self.driver.find_element(By.CSS_SELECTOR, \"#search > div > div > div > div.box-body > div.blocBtn > button\")\n self.saveBtnCssSelector = \"#gridFrom > button\"\n\n def get_list_page(self):\n try:\n self.driver.get(\"https://massar.men.gov.ma/Evaluation/Absence/AbsenceJournaliereParClasse\")\n except Exception as e:\n print_error(e, console=self.console)\n print_error(\"We Can't find the list page! Close the program and try again.\", console=self.console)\n else:\n print_info(\"GETTING TO THE LIST PAGE\", console=self.console)\n\n def get_classes_from_classes_page(self):\n return\n\n def main_absence_loop(self):\n TypeEnseignement = self.driver.find_element(By.ID, \"TypeEnseignement\")\n TypeEnseignement_all_options = TypeEnseignement.find_elements(By.TAG_NAME, \"option\")\n TypeEnseignement_Select = Select(TypeEnseignement)\n\n for TypeEnseignement_option in TypeEnseignement_all_options:\n try:\n WebDriverWait(self.driver, 5).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error(\"CHECK YOUR INTERNET CONNECTION THEN TRY AGAIN\", console=self.console)\n TypeEnseignement_Select.select_by_value(TypeEnseignement_option.get_attribute(\"value\"))\n\n Cycle = self.driver.find_element(By.ID, \"Cycle\")\n Cycle_all_options = Cycle.find_elements(By.TAG_NAME, \"option\")\n\n Cycle_Select = Select(Cycle)\n\n for Cycle_option in Cycle_all_options:\n if Cycle_option.text != \"\":\n Cycle_Select.select_by_value(Cycle_option.get_attribute(\"value\"))\n Niveau = self.driver.find_element(By.ID, \"Niveau\")\n Niveau_all_options = Niveau.find_elements(By.TAG_NAME, \"option\")\n Niveau_Select = Select(Niveau)\n\n for Niveau_option in Niveau_all_options:\n if Niveau_option.text != \"\":\n Niveau_Select.select_by_value(Niveau_option.get_attribute(\"value\"))\n\n Classe = self.driver.find_element(By.ID, \"Classe\")\n Classe_all_options = Classe.find_elements(By.TAG_NAME, \"option\")\n Classe_Select = Select(Classe)\n\n for Classe_option in Classe_all_options:\n\n if Classe_option.text != \"\":\n classe_absence = Scan_Absences(classe=Classe_option.text)\n classe_list_absence, start_date, end_date = classe_absence.get_absence_day_per_student2()\n\n if classe_list_absence == False:\n print_info(f\"THE CLASS {Classe_option.text} NOT IN THE EXCEL FILE\", console=self.console)\n continue\n self.dates = get_date_list(start_date_str=start_date, end_date_str=end_date)\n Classe_Select.select_by_value(Classe_option.get_attribute(\"value\"))\n for l in range(len(self.dates)):\n print_success(f\"WORKING ON CLASS {Classe_option.text}, DATE {self.dates[l]}...\", console=self.console)\n date = self.driver.find_element(By.ID, \"Jour\")\n date.send_keys(Keys.CONTROL + \"a\")\n date.send_keys(Keys.DELETE)\n date.send_keys(self.dates[l])\n try:\n WebDriverWait(self.driver, 15).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#search > div > div > div > div.box-body > div.blocBtn > button'))\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n else:\n self.searchBtn = self.driver.find_element(By.CSS_SELECTOR, '#search > div > div > div > div.box-body > div.blocBtn > button')\n self.searchBtn.click()\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n continue\n else:\n print_info(\"FILLING THE ABSENCE...\", console=self.console)\n self.fill_absence(classe_list_absence=classe_list_absence,class_name=Classe_option.text, day_index = l)\n try:\n WebDriverWait(self.driver, 30).until(\n EC.presence_of_element_located((By.CSS_SELECTOR,\"#gridFrom > button\"))\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE SAVE BUTTON ', console=self.console)\n self.driver.quit()\n # sys.exit()\n else:\n try:\n WebDriverWait(self.driver, 15).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"#gridFrom > button\")))\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE SAVE BUTTON', console=self.console)\n else:\n saveBtn = self.driver.find_element(By.CSS_SELECTOR, \"#gridFrom > button\")\n # saveBtn.click()\n self.driver.execute_script(\"arguments[0].click();\", saveBtn)\n\n print_info('SAVE BUTTON IS CLICKED', console=self.console)\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (\n By.ID, \"Model_msg_Btn\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE CLOSE BUTTON', console=self.console)\n else:\n print_info('CLOSE BUTTON IS CLOSED', console=self.console)\n close_btn = self.driver.find_element(By.ID, \"Model_msg_Btn\")\n close_btn.click()\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n\n print_success(f\"CLASS {Classe_option.text} PASSED, DATE {self.dates[l]}\", console=self.console)\n\n return\n\n def fill_absence(self, classe_list_absence, class_name, day_index):\n mytable = self.driver.find_element(By.XPATH, self.data_table_reduced_Xpath)\n i = 0\n for row in mytable.find_elements(By.CSS_SELECTOR, 'tr'):\n i += 1\n cne = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(i) + str(self.CNE_Xpath))\n name = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(i) + str(self.nome_Xpath))\n\n try:\n week_absence_student = classe_list_absence[cne.text]\n week_days_per_student = self.list_week_to_days(week_absence_student)\n except KeyError as e:\n print_error(e, self.console)\n print_error(f'THIS CNE {cne.text} DOES NOT EXIST, THE NAME IS: {name.text}, CLASS: {class_name}', console=self.console)\n else:\n self.fill_absence_per_day(i,week_days_per_student[day_index])\n\n # if classe_name == \"1APIC-1\":\n # time.sleep(400)\n return\n\n def fill_absence_per_day(self,row_i, day):\n j = 0\n if str(day[0]) == \"0\":\n select_cause = Select(self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)))\n select_cause.select_by_value(\"2\")\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.h_Xpath) + str(5) + \"]/input[1]\")\n checkbox.click()\n return\n elif \"x\" in day:\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (\n By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)\n )\n )\n )\n except Exception as e:\n print_error(e, self.console)\n print_error(\"AN ERROR IN HTML SELECTION PLEASE TRY AGAIN.\", console=self.console)\n self.exit_program()\n select_cause = Select(self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)))\n select_cause.select_by_value(\"2\")\n for i in range(len(day)):\n if day[i] == None:\n continue\n if str(day[i]) == \"x\":\n # print(day[i])\n if i < 4:\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.h_Xpath) + str(6 + i) + \"]/input[1]\")\n else:\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(\n self.h_Xpath) + str(8 + i) + \"]/input[1]\")\n checkbox.click()\n else:\n print_error('WE CANNOT REGONIZE THE FILL OF THE CELL', console=self.console)\n\n # j += 1\n # date = self.driver.find_element(By.ID, \"Jour\")\n # date.send_keys(Keys.CONTROL + \"a\")\n # date.send_keys(Keys.DELETE)\n # date.send_keys(self.dates[j])\n # self.searchBtn.click()\n\n\n def list_week_to_days(self, list_week):\n index = 0\n week = []\n day = []\n for i in range(2,len(list_week)):\n if index == 8:\n week.append(day)\n day = []\n index = 0\n day.append(list_week[i])\n index += 1\n week.append(day)\n return week\n\n\n def main_list_reader(self):\n self.get_list_page()\n self.list_of_each_class()\n return" } ]
import tkinter as tk import customtkinter import time import os import threading import logging import sys from tkinter import filedialog from PIL import Image from validate_email import validate_email from utilities import C_File, C_Dossier from dotenv import set_key, load_dotenv from absence_app import Read_Db from absence_app import Absence from Interaction_browser import Massar_Direction_Sagou
13,411
self.run_bot = customtkinter.CTkButton(self.tabview_fill_bot.tab("Review & Submit"), text="Run", command=self.run_bot_interaction, width=50) self.run_bot.grid(row=6, column=5, padx=10, pady=(5, 5)) self.return_btn5 = customtkinter.CTkButton(self.tabview_fill_bot.tab("Review & Submit"), text="Back", command=self.back2, width=50, fg_color="gray30") self.return_btn5.grid(row=6, column=4, padx=10, pady=(5, 5)) if self.about_us_text is not None: self.about_us_text.grid_remove() self.about_us_logo.grid_remove() self.console_text.grid() self.try_again_fill = True self.select_frame_by_name("Fill Absence Bot") else: self.tabview_generate_lists.grid_remove() self.tabview_fill_bot.grid() self.console_text.grid() if self.about_us_text is not None: self.about_us_text.grid_remove() self.about_us_logo.grid_remove() self.select_frame_by_name("Fill Absence Bot") def about_us_button_event(self): if self.tabview_generate_lists.grid_info(): self.tabview_generate_lists.grid_remove() if self.tabview_fill_bot is not None: if self.tabview_fill_bot.grid_info(): self.tabview_fill_bot.grid_remove() if self.about_us_text is not None: self.about_us_text.grid() self.about_us_logo.grid() self.console_text.grid_remove() self.select_frame_by_name("About us") else: self.about_us_logo = customtkinter.CTkLabel(self, text="", image=self.about_us_image) self.about_us_logo.grid(row=0, column=1, padx=10, pady=10) self.about_us_text = customtkinter.CTkTextbox(self, height=200, wrap="word", font=("Arial", 18)) self.about_us_text.grid(row=1, column=1,rowspan=3, columnspan=6, padx=(20, 20), pady = (15, 20), sticky = "nsew") self.console_text.grid_remove() self.about_us_text.tag_config("Title", foreground="gray92") self.about_us_text.tag_config("subTitle", foreground="gray65") self.about_us_text.tag_config("Paragraph", foreground="gray50") # Content to be displayed # Insert the formatted text into the Text widget self.about_us_text.insert("end", "\n About Us", "LargeText") self.about_us_text.insert("end", "\n\nMassar Direction Sagoubot is a cutting-edge automation project designed to streamline and simplify the process of managing absence data for multiple classes within a web application. Our solution is meticulously crafted using modern technologies and software tools to optimize efficiency and save valuable time for teachers and administrators.\n", "Paragraph") self.about_us_text.insert("end", "\n\n Terms and Privacy", "Title") self.about_us_text.insert("end", "\n\nAccount Access", "subTitle") self.about_us_text.insert("end", "\nTo enhance your experience with Massar Direction Sagoubot, the application utilizes your account credentials to securely log in to the Massar website. Your privacy and security are of utmost importance to us. We ensure that your login information is encrypted and used solely for the purpose of automating absence data management.\n", "Paragraph") self.about_us_text.insert("end", "\n\nData Handling", "subTitle") self.about_us_text.insert("end", "\nYour data, specifically related to absence records and class information, is processed within the confines of the application to facilitate automation. We do not store or retain any of your personal data beyond the scope of improving application functionality.\n", "Paragraph") self.about_us_text.insert("end", "\n\nSecurity Measures", "subTitle") self.about_us_text.insert("end", "\nWe employ industry-standard security measures to safeguard your account information. This includes encryption protocols and best practices to prevent unauthorized access or misuse of your credentials.\n", "Paragraph") self.about_us_text.insert("end", "\n\nUser Consent", "subTitle") self.about_us_text.insert("end", "\nBy using Massar Direction Sagoubot, you consent to the utilization of your Massar account credentials for the sole purpose of automating absence data management. We prioritize transparency and security in handling your login information.\n", "Paragraph") self.about_us_text.insert("end", "\n\nQuestions or Concerns", "subTitle") self.about_us_text.insert("end", "\nIf you have any questions, concerns, or require further clarification regarding our terms, privacy practices, or the usage of your account information, please feel free to reach out to us at [email protected]. Your satisfaction and trust are our top priorities.\n", "Paragraph") self.about_us_text.configure(state="disabled") self.select_frame_by_name("About us") def exit(self): result = tk.messagebox.askokcancel("Confirmation", "Are you sure you want to exit?") if result: # If the user confirms app.quit() # backend functions def generate_absence_file(self): self.generate_progress_bar() self.submit3.configure(state="disabled") self.return_btn3.configure(state="disabled") self.console_text.configure(state="normal") self.label_all_review1.configure(text_color="gray35") def run_fill_all_class_sheets(): reader = Read_Db(input_file=self.entry_path.get(), template_file=self.entry_path2.get(), output_file=str(self.output_path.get()) + "\\absence.xlsx", required_classes=self.selected_classes, progress_bar=self.progressbar_1, console=self.console_text) reader.fill_all_class_sheets() time.sleep(3) self.submit3.configure(state="normal") self.return_btn3.configure(state="normal") self.progressbar_1.grid_remove() self.console_text.configure(state="disabled") self.label_all_review1.configure(text_color="gray70") thread = threading.Thread(target=run_fill_all_class_sheets) thread.start() return def run_bot_interaction(self): self.generate_progress_bar(determinate=False) self.console_text.configure(state="normal") self.run_bot.configure(state="disabled") self.return_btn5.configure(state="disabled") self.label_all_review2.configure(text_color="gray35") def run_fill_absence(): # loading the class here because of the .env file not getting refreshed interaction_object = Massar_Direction_Sagou(console=self.console_text) driver_test = interaction_object.main_interaction() if driver_test: interaction_object.get_list_page()
# https://stackoverflow.com/questions/31836104/pyinstaller-and-onefile-how-to-include-an-image-in-the-exe-file def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS2 except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) logging.basicConfig(filename='app.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') customtkinter.set_appearance_mode("Dark") # Modes: "System" (standard), "Dark", "Light" customtkinter.set_default_color_theme("dark-blue") # Themes: "blue" (standard), "green", "dark-blue" dirPath = os.path.dirname(os.path.realpath(__file__)) class App(customtkinter.CTk): def __init__(self): super().__init__() self.tabview_generate_lists = None self.tabview_fill_bot= None self.generate_list_menu = None self.about_us_text = None self.fill_absence_menu = None self.try_again_generate = False self.try_again_fill = False self.progressbar_1 = None image_path = resource_path("images") self.main_logo_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(200,200)) self.about_us_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(150, 150)) # self.main_logo_photo = ImageTk.PhotoImage(self.main_logo_image) # configure window self.title("SagouBot Massar Direction") self.iconbitmap(resource_path("icon.ico")) self.geometry(f"{1100}x{580}") # configure grid layout (4x4) self.grid_columnconfigure(1, weight=1) self.grid_columnconfigure((2, 3), weight=0) self.grid_rowconfigure((0, 1, 2), weight=1) # create sidebar frame with widgets self.sidebar_frame = customtkinter.CTkFrame(self, width=200, corner_radius=0) self.sidebar_frame.grid(row=0, column=0, rowspan=4, sticky="nsew") self.sidebar_frame.grid_rowconfigure(5, weight=1) self.sidebar_frame.grid(row=0, column=0) self.sideBar_logo = customtkinter.CTkLabel(self.sidebar_frame, text="", image=self.main_logo_image) self.sideBar_logo.grid(row=5, column=0, padx=20, pady=20) self.entry_default_bordercolor = customtkinter.CTkEntry(self).cget("border_color") # self.logo_label = customtkinter.CTkLabel(self.sidebar_frame, text="SagouBot", font=customtkinter.CTkFont(size=40, weight="bold")) # self.logo_label.grid(row=1, column=0, padx=20, pady=(20, 10)) self.generate_list_menu_button_event() # Console (Text area) self.console_text = customtkinter.CTkTextbox(self, height=200, width=400, fg_color="gray1") self.console_text.insert("0.0", "CONSOLE") self.console_text.insert(F"{len('CONSOLE')}.0", "--------" * 28) self.console_text.configure(state="disabled") self.console_text.grid(row=1, column=1, padx=(20, 20), pady=(5, 15), sticky="nsew") self.console_text.tag_config("error", foreground="red") self.console_text.tag_config("note", foreground="orange") self.console_text.tag_config("successes", foreground="blue") # self.generate_progress_bar() # Progress Bar # progress_bar = customtkinter.CTkProgressBar(self, mode='determinate') # progress_bar.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") # # Button to trigger updates # update_button = customtkinter.CTkButton(self, text="Start Processing", command=()) # update_button.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") def high_school_switch(self): state = self.high_school_options.get() options = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] if state: for option in options: option.configure(state="normal") else: for option in options: option.configure(state="disabled") return def college_switch(self): state = self.college_options.get() if state: self.college_generale.configure(state="normal") self.college_aspeb.configure(state="normal") self.college_inter.configure(state="normal") else: self.college_generale.configure(state="disabled") self.college_aspeb.configure(state="disabled") self.college_inter.configure(state="disabled") def college_label_error(self): current_text = self.label_college.cget("text") self.label_college.configure(text=current_text.replace("*", "") + "*", text_color="red") return def high_school_label_eroor(self): current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_label_high_college(self): current_text1 = self.label_college.cget("text") current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", ""), text_color="gray90") self.label_college.configure(text=current_text1.replace("*", ""), text_color="gray90") def label_data_file_error(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", "") + "*", text_color="red") return def label_template_file_error(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_error1(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", ""), text_color="gray90") return def reset_error2(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", ""), text_color="gray90") return def directory_error(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text + "*", text_color="red") return def reset_error3(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text.replace("*", ""), text_color="gray90") return def go_to_review2(self): if self.email_entry.get() == "" or self.password_entry.get() == "" or not self.validate_path(self.entry_path_absence) or not self.check_terms_and_condition.get(): if self.email_entry.get() == "": self.error_label(self.label_email_entry) self.entry_error(self.email_entry) if len(self.password_entry.get()) < 8: self.error_label(self.label_password_entry) self.entry_error(self.password_entry) if not self.validate_path(self.entry_path_absence): self.error_label(self.label_absence_data_file) self.entry_error(self.entry_path_absence) if not self.check_terms_and_condition.get(): self.check_terms_and_condition.configure(border_color="red", text_color="red") self.error_label(self.label_terms) else: paths = C_File(resource_path("db/paths.txt")) L = paths.fichier_to_Liste() L[3] = "ABSENCE_FILE" + "=" + self.entry_path_absence.get() +"\n" L[4] = "EMAIL" + "=" + self.email_entry.get() +"\n" paths.Liste_to_Fichier(L) set_key(dotenv_path=os.path.join(dirPath,".env"), key_to_set="EMAIL", value_to_set=self.email_entry.get()) set_key(dotenv_path=os.path.join(dirPath,".env"), key_to_set="PASSWORD", value_to_set=self.password_entry.get()) load_dotenv(dotenv_path=os.path.join(dirPath,".env")) self.tabview_fill_bot.set("Review & Submit") self.label_all_review2 = customtkinter.CTkTextbox(self.tabview_fill_bot.tab("Review & Submit")) self.label_all_review2.grid(row=0, column=0, columnspan=6, sticky="nsew") # self.label_all_review2.insert("1.0", text) text = f"Email:" text += " " * (30 - len("Email:")) text += str(self.email_entry.get()) + "\n\n" self.label_all_review2.insert("end", text) text = "Absence Excel File:" text += " " * (30 - len("Absence Excel File:")) text += str(self.entry_path_absence.get())+ "\n\n" self.label_all_review2.insert("end", text) text = "Browser:" text += " " * (30 - len("Browser:")) if self.browser_type.get() == 2: text += "FireFox" else: text += "Chrome" self.label_all_review2.insert("end", text) self.label_all_review2.configure(state="disabled", text_color="gray70") return def go_to_output_location(self): if self.tabview_generate_lists.grid_info(): tabview = self.tabview_generate_lists tab = tabview.get() optionsHighSchool = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] optionsCollege = [ self.college_inter, self.college_aspeb, self.college_generale ] selected_classes = [] paths = C_File(resource_path("db/paths.txt")) if tab == "Setup": # path validation if self.validate_path(self.entry_path) and self.validate_path(self.entry_path2) and ( self.college_options.get() or self.high_school_options.get()): if self.high_school_options.get(): for option in optionsHighSchool: if option.get(): selected_classes.append((option.cget("text"))) if self.college_options.get(): for option in optionsCollege: if option.get(): selected_classes.append((option.cget("text"))) if len(selected_classes) == 0: self.college_label_error() self.high_school_label_eroor() else: self.selected_classes = selected_classes self.tabview_generate_lists.set("Output Location") L = paths.fichier_to_Liste() L[0] = "DATA" + "=" + self.entry_path.get() + "\n" L[1] = "TEMPLATE" + "=" + self.entry_path2.get() + "\n" paths.Liste_to_Fichier(L) else: if not self.validate_path(self.entry_path): self.label_data_file_error() if not self.validate_path(self.entry_path2): self.label_template_file_error() if self.high_school_options.get(): for option in optionsHighSchool: if option.get(): selected_classes.append((option.cget("text"))) if self.college_options.get(): for option in optionsCollege: if option.get(): selected_classes.append((option.cget("text"))) if len(selected_classes) == 0: self.college_label_error() self.high_school_label_eroor() if tab == "Output Location": if self.validate_dir(self.output_path): self.tabview_generate_lists.set("Review & Submit") L = paths.fichier_to_Liste() L[-1] = "DIR" + "=" + self.output_path.get() paths.Liste_to_Fichier(L) self.label_all_review1 = customtkinter.CTkTextbox(self.tabview_generate_lists.tab("Review & Submit")) self.label_all_review1.grid(row=0, column=0, columnspan=6, sticky="nsew") # self.label_all_review2.insert("1.0", text) text = f"Data file path:" text += " " * (30 - len("Data file path:")) text += str(self.entry_path.get()) + "\n\n" self.label_all_review1.insert("end", text) text = "Template file path:" text += " " * (30 - len("Template file path:")) text += str(self.entry_path2.get()) + "\n\n" self.label_all_review1.insert("end", text) text = "Classes:" text += " " * (30 - len("Classes:")) for c in self.selected_classes: text = text + c + ",\t" self.label_all_review1.insert("end", text + "\n\n") text = "Output directory:" text += " " * (30 - len("Output directory:")) text += str(self.output_path.get()) + "\n\n" self.label_all_review1.insert("end", text) self.label_all_review1.configure(state="disabled", text_color="gray70") else: self.directory_error() return def browse_path(self): filetypes = ( ("Text files", "*.xls"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["DATA"]) if self.path["DATA"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.entry_path.delete(0, tk.END) # Clear the entry self.entry_path.insert(0, os.path.abspath(path)) self.path["DATA"] = path file = C_File(file_name=path) if file.existe_fichier(): self.reset_error1() def browse_path2(self): filetypes = ( ("Text files", "*.xlsx"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["TEMPLATE"]) if self.path["TEMPLATE"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.entry_path2.delete(0, tk.END) # Clear the entry self.entry_path2.insert(0, os.path.abspath(path)) self.path["TEMPLATE"] = path file = C_File(file_name=path) if file.existe_fichier(): self.reset_error2() def browser_path3(self): filetypes = ( ("Text files", "*.xlsx"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["ABSENCE_FILE"]) if self.path["ABSENCE_FILE"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.path["ABSENCE_FILE"] = path self.entry_path_absence.delete(0, tk.END) # Clear the entry self.entry_path_absence.insert(0, os.path.abspath(path)) file = C_File(file_name=path) if file.existe_fichier(): self.reset_label(self.label_absence_data_file) self.entry_reset(self.entry_path_absence) def browse_folder(self): path = filedialog.askdirectory(initialdir=self.path["DIR"] if self.path["DIR"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.output_path.delete(0, tk.END) self.output_path.insert(0, os.path.abspath(path)) self.path["DIR"] = path dir = C_Dossier() if dir.existe_dossier(Chemin=path): self.reset_error3() return # Function to validate the path entry def validate_path(self, path): if path.get() == "": return False file = C_File(file_name=path.get()) return file.existe_fichier() def validate_dir(self, path): if path.get() == "": return False dir = C_Dossier() return dir.existe_dossier(Chemin=path.get()) def back(self): if self.tabview_generate_lists.grid_info(): tab = self.tabview_generate_lists else: tab = self.tabview_fill_bot if tab.get() == "Review & Submit": tab.set("Output Location") elif tab.get() == "Output Location": tab.set("Setup") return def back2(self): self.tabview_fill_bot.set("Setup") if self.tabview_fill_bot.grid_info() else self.tabview_generate_lists.set("Setup") return def select_frame_by_name(self, name): # set button color for selected button self.generate_list_menu.configure(fg_color=("gray75", "gray25") if name == "Generate Lists" else "transparent") self.fill_absence_menu.configure(fg_color=("gray75", "gray25") if name == "Fill Absence Bot" else "transparent") self.about_us_menu.configure(fg_color=("gray75", "gray25") if name == "About us" else "transparent") def generate_progress_bar(self, determinate=True): if self.progressbar_1 is None: self.progressbar_1 = customtkinter.CTkProgressBar(self.sidebar_frame, mode="determinate" if determinate == True else "indeterminate") state = True else: self.progressbar_1.configure(mode="determinate" if determinate == True else "indeterminate") state = False if determinate: self.progressbar_1.set(0) else: self.progressbar_1.start() if state: self.progressbar_1.grid(row=6, column=0, padx=20, pady=20, sticky="ew") else: self.progressbar_1.grid() def generate_list_menu_button_event(self): if self.try_again_generate != False: test = self.generate_list_menu.cget("fg_color") if test == ("gray75", "gray25"): self.tabview_generate_lists.set("Setup") return if self.try_again_generate == False: self.generate_list_menu = customtkinter.CTkButton(self.sidebar_frame, corner_radius=0, height=40, border_spacing=10, text="Generate Lists", fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), anchor="w", command=self.generate_list_menu_button_event) self.generate_list_menu.grid(row=1, column=0, sticky="ew", pady=(20, 0)) self.fill_absence_menu = customtkinter.CTkButton(self.sidebar_frame, corner_radius=0, height=40, border_spacing=10, text="Fill Absence Bot", fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), anchor="w", command=self.fill_absence_button_event ) self.fill_absence_menu.grid(row=2, column=0, sticky="ew") self.about_us_menu = customtkinter.CTkButton(self.sidebar_frame, corner_radius=0, height=40, border_spacing=10, text="About us", fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), anchor="w", command=self.about_us_button_event ) self.about_us_menu.grid(row=3, column=0, sticky="ew") # end of side bar # generate lists page self.tabview_generate_lists = customtkinter.CTkTabview(self, width=250, state='disabled', text_color_disabled='white', height=250) self.tabview_generate_lists.grid(row=0, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") self.tabview_generate_lists.add("Setup") self.tabview_generate_lists.add("Output Location") self.tabview_generate_lists.add("Review & Submit") self.tabview_generate_lists.tab("Setup").grid_columnconfigure(0, weight=1) # setup tab self.tabview_generate_lists.tab("Setup").grid_rowconfigure(0, weight=1) self.tabview_generate_lists.tab("Setup").grid_columnconfigure(0, weight=1) # data entry # check if file exist paths = C_File(file_name=resource_path("db/paths.txt")) self.path={} if paths.existe_fichier(): self.paths = paths.fichier_to_Liste() for path in self.paths: path_splited = path.split("=") self.path[path_splited[0]]=path_splited[-1].strip() self.data_entry_frame = customtkinter.CTkFrame(self.tabview_generate_lists.tab("Setup")) self.data_entry_frame.grid(sticky='nw', row=0, column=0, padx=5, pady=(0, 0)) self.label_data_file = customtkinter.CTkLabel(self.data_entry_frame, text="Data File (.xls):", text_color="gray90") self.label_data_file.grid(row=0, column=0, padx=(0, 5), pady=(15, 0)) self.entry_path = customtkinter.CTkEntry(self.data_entry_frame, placeholder_text="C:\\", validate='focusout', validatecommand=((), '%P'), width=250) self.entry_path.grid(row=0, column=1, padx=(100, 5), pady=(15, 0)) self.browse_button = customtkinter.CTkButton(self.data_entry_frame, text="Browse", command=self.browse_path, width=50) self.browse_button.grid(row=0, column=2, padx=(0, 5), pady=(15, 0)) self.label_template_entry = customtkinter.CTkLabel(self.data_entry_frame, text="Template file (.xlsx):") self.label_template_entry.grid(row=1, column=0, padx=(0, 5), pady=(15, 0)) self.entry_path2 = customtkinter.CTkEntry(self.data_entry_frame, placeholder_text="C:\\", validate='focusout', width=250) self.entry_path2.grid(row=1, column=1, padx=(100, 5), pady=(15, 10)) self.browse_button2 = customtkinter.CTkButton(self.data_entry_frame, text="Browse", command=self.browse_path2, width=50) self.browse_button2.grid(row=1, column=2, padx=(0, 5), pady=(15, 10)) if self.path["DATA"] != "": self.entry_path.insert(0, self.path["DATA"]) if self.path["TEMPLATE"] != "": self.entry_path2.insert(0, self.path["TEMPLATE"]) self.class_type_options_frame = customtkinter.CTkFrame(self.tabview_generate_lists.tab("Setup"), fg_color="gray25", height=100) self.class_type_options_frame.grid(sticky="nsew", row=2, column=0, columnspan=6, padx=10, pady=(20, 20)) # self.error_label = customtkinter.CTkLabel(self.class_type_options_frame, text="You have to choose atlease one class", text_color="black") # self.error_label.grid(row=0, column=0, padx=(0,0)) self.label_college = customtkinter.CTkLabel(self.class_type_options_frame, text="College Classes") self.label_college.grid(row=0, column=0, padx=(0, 0)) self.college_options = customtkinter.CTkSwitch(self.class_type_options_frame, text="College", state="switched", command=self.college_switch) self.college_options.select() self.college_options.grid(row=1, column=0, padx=(0, 0)) self.college_inter = customtkinter.CTkCheckBox(self.class_type_options_frame, text="APIC", state="normal", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.college_inter.grid(row=2, column=0, padx=(20, 0), pady=(10, 0), sticky="n") self.college_generale = customtkinter.CTkCheckBox(self.class_type_options_frame, text="ASCG", state="normal", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.college_generale.grid(row=2, column=1, padx=(0, 0), pady=(10, 0), sticky="n") self.college_aspeb = customtkinter.CTkCheckBox(self.class_type_options_frame, text="ASCPEB", state="normal", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.college_aspeb.grid(row=3, column=0, padx=(20, 0), pady=(5, 5), sticky="n") self.label_high_school = customtkinter.CTkLabel(self.class_type_options_frame, text="High School Classes", anchor="e") self.label_high_school.grid(row=0, column=2, padx=(100, 0)) self.high_school_options = customtkinter.CTkSwitch(self.class_type_options_frame, text="High School", state="switched", command=self.high_school_switch) # self.high_school_options.select() self.high_school_options.grid(row=1, column=2, padx=(80, 0)) self.TCS = customtkinter.CTkCheckBox(self.class_type_options_frame, text="TCS", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.TCS.grid(row=2, column=2, padx=(100, 0), pady=(5, 0), sticky="nsew") self.TCSF = customtkinter.CTkCheckBox(self.class_type_options_frame, text="TCSF", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.TCSF.grid(row=2, column=3, padx=(0, 0), pady=(5, 0), sticky="nsew") self.TCLSH = customtkinter.CTkCheckBox(self.class_type_options_frame, text="TCLSH", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.TCLSH.grid(row=3, column=2, padx=(100, 0), pady=(5, 5), sticky="nsew") self.BACSE = customtkinter.CTkCheckBox(self.class_type_options_frame, text="1BACSE", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.BACSE.grid(row=3, column=3, padx=(0, 0), pady=(5, 5), sticky="nsew") self.BACSH = customtkinter.CTkCheckBox(self.class_type_options_frame, text="1BACSH", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.BACSH.grid(row=3, column=4, padx=(0, 0), pady=(5, 5), sticky="nsew") self.BACSC = customtkinter.CTkCheckBox(self.class_type_options_frame, text="2BACSC", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.BACSC.grid(row=3, column=5, padx=(0, 0), pady=(5, 5), sticky="nsew") self.BACSH2 = customtkinter.CTkCheckBox(self.class_type_options_frame, text="2BACSH", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.BACSH2.grid(row=2, column=4, padx=(0, 0), pady=(5, 0), sticky="nsew") self.BACSVT = customtkinter.CTkCheckBox(self.class_type_options_frame, text="2BACSVT", state="disabled", checkbox_width=20, checkbox_height=20, command=self.reset_label_high_college) self.BACSVT.grid(row=2, column=5, padx=(0, 0), pady=(5, 0), sticky="nsew") self.submit = customtkinter.CTkButton(self.tabview_generate_lists.tab("Setup"), text="Next", command=self.go_to_output_location, width=50) self.submit.grid(row=6, column=5, padx=10, pady=(5, 5)) self.return_btn = customtkinter.CTkButton(self.tabview_generate_lists.tab("Setup"), text="Exit", width=50, fg_color="gray30", command=self.exit) self.return_btn.grid(row=6, column=4, padx=10, pady=(5, 5)) # output location tab self.tabview_generate_lists.tab("Output Location").grid_rowconfigure(0, weight=1) self.tabview_generate_lists.tab("Output Location").grid_columnconfigure((0, 1, 2), weight=1) self.output_location_frame = customtkinter.CTkFrame(self.tabview_generate_lists.tab("Output Location"), height=200) self.output_location_frame.grid(sticky='nw', row=0, column=0, padx=5, pady=(20, 0)) self.label_output_folder = customtkinter.CTkLabel(self.output_location_frame, text="Output Folder") self.label_output_folder.grid(row=0, column=0, padx=(0, 5), pady=(15, 0)) self.output_path = customtkinter.CTkEntry(self.output_location_frame, placeholder_text=self.path["DIR"] if self.path["DIR"] != "" else os.path.join(os.path.expanduser('~'), 'Documents'), validate='focusout', width=250) self.output_path.insert("0", str(self.path["DIR"] if self.path["DIR"] != "" else os.path.join(os.path.expanduser('~'), 'Documents'))) self.output_path.grid(row=0, column=1, padx=(100, 5), pady=(15, 0)) self.browse_button3 = customtkinter.CTkButton(self.output_location_frame, text="Browse", command=self.browse_folder, width=50) self.browse_button3.grid(row=0, column=2, padx=(0, 5), pady=(15, 0)) self.submit2 = customtkinter.CTkButton(self.tabview_generate_lists.tab("Output Location"), text="Next", command=self.go_to_output_location, width=50) self.submit2.grid(row=3, column=5, padx=10, pady=(5, 5)) self.return_btn2 = customtkinter.CTkButton(self.tabview_generate_lists.tab("Output Location"), text="Back", command=self.back, width=50, fg_color="gray30") self.return_btn2.grid(row=3, column=4, padx=10, pady=(5, 5)) # review tab self.tabview_generate_lists.tab("Review & Submit").grid_rowconfigure(0, weight=1) self.tabview_generate_lists.tab("Review & Submit").grid_columnconfigure((0, 1, 2), weight=1) self.submit3 = customtkinter.CTkButton(self.tabview_generate_lists.tab("Review & Submit"), text="Submit", command=self.generate_absence_file, width=60) self.submit3.grid(row=4, column=5, padx=10, pady=(5, 5)) self.return_btn3 = customtkinter.CTkButton(self.tabview_generate_lists.tab("Review & Submit"), text="Back", command=self.back, width=50, fg_color="gray30") self.return_btn3.grid(row=4, column=4, padx=10, pady=(5, 5)) self.select_frame_by_name("Generate Lists") self.try_again_generate = True else: self.tabview_fill_bot.grid_remove() self.tabview_generate_lists.grid() if not self.console_text.grid_info(): self.console_text.grid() if self.about_us_text is not None: self.about_us_text.grid_remove() self.about_us_logo.grid_remove() self.select_frame_by_name("Generate Lists") def entry_error(self, entry): entry.configure(border_color="red") def entry_reset(self, entry): entry.configure(border_color=self.entry_default_bordercolor) def error_label(self, label): current_text = label.cget("text") label.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_label(self, label): current_text = label.cget("text") label.configure(text=current_text.replace("*", ""), text_color="gray90") return def validate_email_entry(self): email = self.email_entry.get() is_valid = validate_email(email) if is_valid: self.reset_label(self.label_email_entry) self.entry_reset(self.email_entry) else: self.error_label(self.label_email_entry) self.entry_error(self.email_entry) def check_terms_box(self): if self.check_terms_and_condition.get(): self.check_terms_and_condition.configure(border_color="gray72", text_color="gray72") self.reset_label(self.label_terms) else: self.check_terms_and_condition.configure(border_color="red", text_color="red") self.error_label(self.label_terms) def fill_absence_button_event(self): test = self.fill_absence_menu.cget("fg_color") if test == ("gray75", "gray25"): self.tabview_fill_bot.set("Setup") return if self.try_again_fill == False: self.tabview_fill_bot = customtkinter.CTkTabview(self, width=250, state='disabled', text_color_disabled='white', height=250) self.tabview_fill_bot.grid(row=0, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") self.tabview_fill_bot.add("Setup") self.tabview_fill_bot.add("Review & Submit") # setup tab self.tabview_fill_bot.tab("Setup").grid_rowconfigure(0, weight=1) self.tabview_fill_bot.tab("Setup").grid_columnconfigure(0, weight=1) self.tabview_fill_bot.tab("Review & Submit").grid_rowconfigure(0, weight=1) self.tabview_fill_bot.tab("Review & Submit").grid_columnconfigure((0, 1, 2), weight=1) # self.generate_list_menu_button_event() self.tabview_fill_bot.set("Setup") # self.submit.destroy() # self.return_btn.destroy() self.data_entry_frame = customtkinter.CTkFrame(self.tabview_fill_bot.tab("Setup")) self.data_entry_frame.grid(sticky='nw', row=0, column=0, padx=5, pady=(0, 0)) self.label_email_entry = customtkinter.CTkLabel(self.data_entry_frame, text="Email:", text_color="gray90") self.label_email_entry.grid(row=0, column=0, padx=(0, 5), pady=(15, 0)) self.email_entry = customtkinter.CTkEntry(self.data_entry_frame, placeholder_text="[email protected]", width=250) self.email_entry.grid(row=0, column=1, padx=(100, 5), pady=(15, 0)) if self.path["EMAIL"] != "": self.email_entry.insert(0, self.path["EMAIL"]) self.email_entry.bind("<KeyRelease>", lambda _ : self.validate_email_entry()) self.label_password_entry = customtkinter.CTkLabel(self.data_entry_frame, text="Password:") self.label_password_entry.grid(row=1, column=0, padx=(0, 5), pady=(15, 0)) self.password_entry = customtkinter.CTkEntry(self.data_entry_frame, show="*" ,placeholder_text="Your Password", width=250) self.password_entry.grid(row=1, column=1, padx=(100, 5), pady=(15, 0)) self.password_entry.bind("<KeyRelease>", lambda _ : (self.reset_label(self.label_password_entry), self.entry_reset(self.password_entry)) if len(self.password_entry.get()) > 8 else (self.error_label(self.label_password_entry), self.entry_error(self.password_entry))) self.label_absence_data_file = customtkinter.CTkLabel(self.data_entry_frame, text="Absence File (.xlsx):", text_color="gray90") self.label_absence_data_file.grid(row=2, column=0, padx=(0, 5), pady=(15, 0)) self.entry_path_absence = customtkinter.CTkEntry(self.data_entry_frame, placeholder_text=self.path["ABSENCE_FILE"] if self.path["ABSENCE_FILE"] != "" else "C://", validate='focusout', validatecommand=((), '%P'), width=250) self.entry_path_absence.grid(row=2, column=1, padx=(100, 5), pady=(15, 0)) if self.path["ABSENCE_FILE"] != "": self.entry_path_absence.insert(0, self.path["ABSENCE_FILE"]) self.browse_button_absence = customtkinter.CTkButton(self.data_entry_frame, text="Browse", command=self.browser_path3, width=50) self.browse_button_absence.grid(row=2, column=2, padx=(0, 5), pady=(15,0)) self.label_browser_chrome_firefox = customtkinter.CTkLabel(self.data_entry_frame, text="Browser:", text_color="gray90") self.label_browser_chrome_firefox.grid(row=3, column=0, padx=(0, 5), pady=(15, 0)) self.browser_type = customtkinter.IntVar() self.chrome_radio = customtkinter.CTkRadioButton(self.data_entry_frame, text="Chrome", variable=self.browser_type, value=1, state="disabled") self.chrome_radio.grid(row=3, column=1, padx=(10, 5), pady=(15, 0)) self.firefox_radio = customtkinter.CTkRadioButton(self.data_entry_frame, text="Firefox", variable=self.browser_type, value=2) self.firefox_radio.grid(row=3, column=2, padx=(10, 5), pady=(15,0)) self.firefox_radio.select() self.label_terms = customtkinter.CTkLabel(self.data_entry_frame, text="Terms and conditions:", text_color="gray90") self.label_terms.grid(row=4, column=0, padx=(0, 5), pady=(20, 0)) self.check_terms_and_condition = customtkinter.CTkCheckBox(self.data_entry_frame, text="I accept the Terms and the Conditions", state="normal",checkbox_width=20, checkbox_height=20, command=self.check_terms_box) self.check_terms_and_condition.grid(row=4, column=1, padx=(0, 0), pady=(20,0), sticky="ne") self.submit4 = customtkinter.CTkButton(self.tabview_fill_bot.tab("Setup"), text="Next", command=self.go_to_review2, width=50) self.submit4.grid(row=6, column=5, padx=10, pady=(5, 5)) self.return_btn4 = customtkinter.CTkButton(self.tabview_fill_bot.tab("Setup"), text="Exit", width=50, fg_color="gray30", command=self.exit) self.return_btn4.grid(row=6, column=4, padx=10, pady=(5, 5)) self.run_bot = customtkinter.CTkButton(self.tabview_fill_bot.tab("Review & Submit"), text="Run", command=self.run_bot_interaction, width=50) self.run_bot.grid(row=6, column=5, padx=10, pady=(5, 5)) self.return_btn5 = customtkinter.CTkButton(self.tabview_fill_bot.tab("Review & Submit"), text="Back", command=self.back2, width=50, fg_color="gray30") self.return_btn5.grid(row=6, column=4, padx=10, pady=(5, 5)) if self.about_us_text is not None: self.about_us_text.grid_remove() self.about_us_logo.grid_remove() self.console_text.grid() self.try_again_fill = True self.select_frame_by_name("Fill Absence Bot") else: self.tabview_generate_lists.grid_remove() self.tabview_fill_bot.grid() self.console_text.grid() if self.about_us_text is not None: self.about_us_text.grid_remove() self.about_us_logo.grid_remove() self.select_frame_by_name("Fill Absence Bot") def about_us_button_event(self): if self.tabview_generate_lists.grid_info(): self.tabview_generate_lists.grid_remove() if self.tabview_fill_bot is not None: if self.tabview_fill_bot.grid_info(): self.tabview_fill_bot.grid_remove() if self.about_us_text is not None: self.about_us_text.grid() self.about_us_logo.grid() self.console_text.grid_remove() self.select_frame_by_name("About us") else: self.about_us_logo = customtkinter.CTkLabel(self, text="", image=self.about_us_image) self.about_us_logo.grid(row=0, column=1, padx=10, pady=10) self.about_us_text = customtkinter.CTkTextbox(self, height=200, wrap="word", font=("Arial", 18)) self.about_us_text.grid(row=1, column=1,rowspan=3, columnspan=6, padx=(20, 20), pady = (15, 20), sticky = "nsew") self.console_text.grid_remove() self.about_us_text.tag_config("Title", foreground="gray92") self.about_us_text.tag_config("subTitle", foreground="gray65") self.about_us_text.tag_config("Paragraph", foreground="gray50") # Content to be displayed # Insert the formatted text into the Text widget self.about_us_text.insert("end", "\n About Us", "LargeText") self.about_us_text.insert("end", "\n\nMassar Direction Sagoubot is a cutting-edge automation project designed to streamline and simplify the process of managing absence data for multiple classes within a web application. Our solution is meticulously crafted using modern technologies and software tools to optimize efficiency and save valuable time for teachers and administrators.\n", "Paragraph") self.about_us_text.insert("end", "\n\n Terms and Privacy", "Title") self.about_us_text.insert("end", "\n\nAccount Access", "subTitle") self.about_us_text.insert("end", "\nTo enhance your experience with Massar Direction Sagoubot, the application utilizes your account credentials to securely log in to the Massar website. Your privacy and security are of utmost importance to us. We ensure that your login information is encrypted and used solely for the purpose of automating absence data management.\n", "Paragraph") self.about_us_text.insert("end", "\n\nData Handling", "subTitle") self.about_us_text.insert("end", "\nYour data, specifically related to absence records and class information, is processed within the confines of the application to facilitate automation. We do not store or retain any of your personal data beyond the scope of improving application functionality.\n", "Paragraph") self.about_us_text.insert("end", "\n\nSecurity Measures", "subTitle") self.about_us_text.insert("end", "\nWe employ industry-standard security measures to safeguard your account information. This includes encryption protocols and best practices to prevent unauthorized access or misuse of your credentials.\n", "Paragraph") self.about_us_text.insert("end", "\n\nUser Consent", "subTitle") self.about_us_text.insert("end", "\nBy using Massar Direction Sagoubot, you consent to the utilization of your Massar account credentials for the sole purpose of automating absence data management. We prioritize transparency and security in handling your login information.\n", "Paragraph") self.about_us_text.insert("end", "\n\nQuestions or Concerns", "subTitle") self.about_us_text.insert("end", "\nIf you have any questions, concerns, or require further clarification regarding our terms, privacy practices, or the usage of your account information, please feel free to reach out to us at [email protected]. Your satisfaction and trust are our top priorities.\n", "Paragraph") self.about_us_text.configure(state="disabled") self.select_frame_by_name("About us") def exit(self): result = tk.messagebox.askokcancel("Confirmation", "Are you sure you want to exit?") if result: # If the user confirms app.quit() # backend functions def generate_absence_file(self): self.generate_progress_bar() self.submit3.configure(state="disabled") self.return_btn3.configure(state="disabled") self.console_text.configure(state="normal") self.label_all_review1.configure(text_color="gray35") def run_fill_all_class_sheets(): reader = Read_Db(input_file=self.entry_path.get(), template_file=self.entry_path2.get(), output_file=str(self.output_path.get()) + "\\absence.xlsx", required_classes=self.selected_classes, progress_bar=self.progressbar_1, console=self.console_text) reader.fill_all_class_sheets() time.sleep(3) self.submit3.configure(state="normal") self.return_btn3.configure(state="normal") self.progressbar_1.grid_remove() self.console_text.configure(state="disabled") self.label_all_review1.configure(text_color="gray70") thread = threading.Thread(target=run_fill_all_class_sheets) thread.start() return def run_bot_interaction(self): self.generate_progress_bar(determinate=False) self.console_text.configure(state="normal") self.run_bot.configure(state="disabled") self.return_btn5.configure(state="disabled") self.label_all_review2.configure(text_color="gray35") def run_fill_absence(): # loading the class here because of the .env file not getting refreshed interaction_object = Massar_Direction_Sagou(console=self.console_text) driver_test = interaction_object.main_interaction() if driver_test: interaction_object.get_list_page()
absence = Absence(driver=interaction_object.driver, console=self.console_text)
3
2023-10-29 18:10:27+00:00
16k
hsma-programme/Teaching_DES_Concepts_Streamlit
pages/4_🏥_The_Full_Model.py
[ { "identifier": "add_logo", "path": "helper_functions.py", "snippet": "def add_logo():\n '''\n Add a logo at the top of the page navigation sidebar\n\n Approach written by blackary on\n https://discuss.streamlit.io/t/put-logo-and-title-above-on-top-of-page-navigation-in-sidebar-of-multipage-app/28213/5\n \n '''\n st.markdown(\n \"\"\"\n <style>\n [data-testid=\"stSidebarNav\"] {\n background-image: url(https://raw.githubusercontent.com/hsma-programme/Teaching_DES_Concepts_Streamlit/main/resources/hsma_logo_transparent_background_small.png);\n background-repeat: no-repeat;\n padding-top: 175px;\n background-position: 40px 30px;\n }\n [data-testid=\"stSidebarNav\"]::before {\n content: \"The DES Playground\";\n padding-left: 20px;\n margin-top: 50px;\n font-size: 30px;\n position: relative;\n top: 100px;\n }\n\n </style>\n \"\"\",\n unsafe_allow_html=True,\n )" }, { "identifier": "mermaid", "path": "helper_functions.py", "snippet": "def mermaid(code: str, height=600) -> None:\n components.html(\n f\"\"\"\n <link href='http://fonts.googleapis.com/css?family=Lexend' rel='stylesheet' type='text/css'>\n\n <pre class=\"mermaid\">\n {code}\n </pre>\n\n <script type=\"module\">\n import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';\n mermaid.initialize({{ startOnLoad: true }});\n </script>\n \"\"\",\n height=height\n )" }, { "identifier": "center_running", "path": "helper_functions.py", "snippet": "def center_running():\n \"\"\"\n Have the \"running man\" animation in the center of the screen instead of the top right corner.\n \"\"\"\n st.markdown(\"\"\"\n<style>\n\ndiv[class*=\"StatusWidget\"]{\n\n position: fixed;\n margin: auto;\n top: 50%;\n left: 50%;\n marginRight: \"0px\"\n width: 50%;\n scale: 2.75;\n opacity: 1\n}\n\n</style>\n\"\"\", \n unsafe_allow_html=True)" }, { "identifier": "Scenario", "path": "model_classes.py", "snippet": "class Scenario:\n '''\n Container class for scenario parameters/arguments\n\n Passed to a model and its process classes\n '''\n\n def __init__(self,\n random_number_set=1,\n n_triage=DEFAULT_N_TRIAGE,\n n_reg=DEFAULT_N_REG,\n n_exam=DEFAULT_N_EXAM,\n n_trauma=DEFAULT_N_TRAUMA,\n n_cubicles_1=DEFAULT_N_CUBICLES_1,\n n_cubicles_2=DEFAULT_N_CUBICLES_2,\n triage_mean=DEFAULT_TRIAGE_MEAN,\n reg_mean=DEFAULT_REG_MEAN,\n reg_var=DEFAULT_REG_VAR,\n exam_mean=DEFAULT_EXAM_MEAN,\n exam_var=DEFAULT_EXAM_VAR,\n trauma_mean=DEFAULT_TRAUMA_MEAN,\n trauma_treat_mean=DEFAULT_TRAUMA_TREAT_MEAN,\n trauma_treat_var=DEFAULT_TRAUMA_TREAT_VAR,\n non_trauma_treat_mean=DEFAULT_NON_TRAUMA_TREAT_MEAN,\n non_trauma_treat_var=DEFAULT_NON_TRAUMA_TREAT_VAR,\n non_trauma_treat_p=DEFAULT_NON_TRAUMA_TREAT_P,\n prob_trauma=DEFAULT_PROB_TRAUMA,\n arrival_df=NSPP_PATH,\n override_arrival_rate=OVERRIDE_ARRIVAL_RATE,\n manual_arrival_rate=MANUAL_ARRIVAL_RATE_VALUE,\n model=\"full\"\n ):\n '''\n Create a scenario to parameterise the simulation model\n\n Parameters:\n -----------\n random_number_set: int, optional (default=DEFAULT_RNG_SET)\n Set to control the initial seeds of each stream of pseudo\n random numbers used in the model.\n\n n_triage: int\n The number of triage cubicles\n\n n_reg: int\n The number of registration clerks\n\n n_exam: int\n The number of examination rooms\n\n n_trauma: int\n The number of trauma bays for stablisation\n\n n_cubicles_1: int\n The number of non-trauma treatment cubicles\n\n n_cubicles_2: int\n The number of trauma treatment cubicles\n\n triage_mean: float\n Mean duration of the triage distribution (Exponential)\n\n reg_mean: float\n Mean duration of the registration distribution (Lognormal)\n\n reg_var: float\n Variance of the registration distribution (Lognormal)\n\n exam_mean: float\n Mean of the examination distribution (Normal)\n\n exam_var: float\n Variance of the examination distribution (Normal)\n\n trauma_mean: float\n Mean of the trauma stabilisation distribution (Exponential)\n\n trauma_treat_mean: float\n Mean of the trauma cubicle treatment distribution (Lognormal)\n\n trauma_treat_var: float\n Variance of the trauma cubicle treatment distribution (Lognormal)\n\n non_trauma_treat_mean: float\n Mean of the non trauma treatment distribution\n\n non_trauma_treat_var: float\n Variance of the non trauma treatment distribution\n\n non_trauma_treat_p: float\n Probability non trauma patient requires treatment\n\n prob_trauma: float\n probability that a new arrival is a trauma patient.\n\n model: string\n What model to run. Default is full. \n Options are \"full\", \"simplest\", \"simple_with_branch\"\n '''\n # sampling\n self.random_number_set = random_number_set\n\n # store parameters for sampling\n self.triage_mean = triage_mean\n self.reg_mean = reg_mean\n self.reg_var = reg_var\n self.exam_mean = exam_mean\n self.exam_var = exam_var\n self.trauma_mean = trauma_mean\n self.trauma_treat_mean = trauma_treat_mean\n self.trauma_treat_var = trauma_treat_var\n self.non_trauma_treat_mean = non_trauma_treat_mean\n self.non_trauma_treat_var = non_trauma_treat_var\n self.non_trauma_treat_p = non_trauma_treat_p\n self.prob_trauma = prob_trauma\n self.manual_arrival_rate = manual_arrival_rate\n self.arrival_df = arrival_df\n self.override_arrival_rate = override_arrival_rate\n self.model = model\n\n self.init_sampling()\n\n # count of each type of resource\n self.init_resource_counts(n_triage, n_reg, n_exam, n_trauma,\n n_cubicles_1, n_cubicles_2)\n\n def set_random_no_set(self, random_number_set):\n '''\n Controls the random sampling \n Parameters:\n ----------\n random_number_set: int\n Used to control the set of psuedo random numbers\n used by the distributions in the simulation.\n '''\n self.random_number_set = random_number_set\n self.init_sampling()\n\n def init_resource_counts(self, n_triage, n_reg, n_exam, n_trauma,\n n_cubicles_1, n_cubicles_2):\n '''\n Init the counts of resources to default values...\n '''\n self.n_triage = n_triage\n self.n_reg = n_reg\n self.n_exam = n_exam\n self.n_trauma = n_trauma\n\n # non-trauma (1), trauma (2) treatment cubicles\n self.n_cubicles_1 = n_cubicles_1\n self.n_cubicles_2 = n_cubicles_2\n\n def init_sampling(self):\n '''\n Create the distributions used by the model and initialise \n the random seeds of each.\n '''\n # create random number streams\n rng_streams = np.random.default_rng(self.random_number_set)\n self.seeds = rng_streams.integers(0, 999999999, size=N_STREAMS)\n\n # create distributions\n\n # Triage duration\n self.triage_dist = Exponential(self.triage_mean,\n random_seed=self.seeds[0])\n\n # Registration duration (non-trauma only)\n self.reg_dist = Lognormal(self.reg_mean,\n np.sqrt(self.reg_var),\n random_seed=self.seeds[1])\n\n # Evaluation (non-trauma only)\n self.exam_dist = Normal(self.exam_mean,\n np.sqrt(self.exam_var),\n random_seed=self.seeds[2])\n\n # Trauma/stablisation duration (trauma only)\n self.trauma_dist = Exponential(self.trauma_mean,\n random_seed=self.seeds[3])\n\n # Non-trauma treatment\n self.nt_treat_dist = Lognormal(self.non_trauma_treat_mean,\n np.sqrt(self.non_trauma_treat_var),\n random_seed=self.seeds[4])\n\n # treatment of trauma patients\n self.treat_dist = Lognormal(self.trauma_treat_mean,\n np.sqrt(self.non_trauma_treat_var),\n random_seed=self.seeds[5])\n\n # probability of non-trauma patient requiring treatment\n self.nt_p_treat_dist = Bernoulli(self.non_trauma_treat_p,\n random_seed=self.seeds[6])\n\n # probability of non-trauma versus trauma patient\n self.p_trauma_dist = Bernoulli(self.prob_trauma,\n random_seed=self.seeds[7])\n\n # init sampling for non-stationary poisson process\n self.init_nspp()\n\n def init_nspp(self):\n\n # read arrival profile\n self.arrivals = pd.read_csv(NSPP_PATH) # pylint: disable=attribute-defined-outside-init\n self.arrivals['mean_iat'] = 60 / self.arrivals['arrival_rate']\n\n # maximum arrival rate (smallest time between arrivals)\n self.lambda_max = self.arrivals['arrival_rate'].max() # pylint: disable=attribute-defined-outside-init\n\n # thinning exponential\n if self.override_arrival_rate is True:\n\n self.arrival_dist = Exponential(self.manual_arrival_rate, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[8])\n else:\n self.arrival_dist = Exponential(60.0 / self.lambda_max, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[8])\n\n # thinning uniform rng\n self.thinning_rng = Uniform(low=0.0, high=1.0, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[9])" }, { "identifier": "multiple_replications", "path": "model_classes.py", "snippet": "def multiple_replications(scenario,\n rc_period=DEFAULT_RESULTS_COLLECTION_PERIOD,\n n_reps=5,\n return_detailed_logs=False):\n '''\n Perform multiple replications of the model.\n\n Params:\n ------\n scenario: Scenario\n Parameters/arguments to configurethe model\n\n rc_period: float, optional (default=DEFAULT_RESULTS_COLLECTION_PERIOD)\n results collection period. \n the number of minutes to run the model to collect results\n\n n_reps: int, optional (default=DEFAULT_N_REPS)\n Number of independent replications to run.\n\n Returns:\n --------\n pandas.DataFrame\n '''\n\n # if return_full_log:\n # results = [single_run(scenario,\n # rc_period,\n # random_no_set=(scenario.random_number_set)+rep,\n # return_full_log=True,\n # return_event_log=False)\n # for rep in range(n_reps)]\n\n # format and return results in a dataframe\n # df_results = pd.concat(reesults)\n # df_results.index = np.arange(1, len(df_results)+1)\n # df_results.index.name = 'rep'\n # return df_results\n # return results\n\n if return_detailed_logs:\n results = [{'rep': rep+1,\n 'results': single_run(scenario,\n rc_period,\n random_no_set=(scenario.random_number_set)+rep,\n return_detailed_logs=True)}\n # .assign(Rep=rep+1)\n for rep in range(n_reps)]\n\n # format and return results in a dataframe\n\n return results\n # {\n # {df_results = [pd.concat(result) for result in results] }\n # }\n # return results\n\n results = [single_run(scenario,\n rc_period,\n random_no_set=(scenario.random_number_set)+rep)\n for rep in range(n_reps)]\n\n # format and return results in a dataframe\n df_results = pd.concat(results)\n df_results.index = np.arange(1, len(df_results)+1)\n df_results.index.name = 'rep'\n return df_results" }, { "identifier": "reshape_for_animations", "path": "output_animation_functions.py", "snippet": "def reshape_for_animations(full_event_log, every_x_minutes=10):\n minute_dfs = list()\n patient_dfs = list()\n\n for rep in range(1, max(full_event_log['rep'])+1):\n # print(\"Rep {}\".format(rep))\n # Start by getting data for a single rep\n filtered_log_rep = full_event_log[full_event_log['rep'] == rep].drop('rep', axis=1)\n pivoted_log = filtered_log_rep.pivot_table(values=\"time\", \n index=[\"patient\",\"event_type\",\"pathway\"], \n columns=\"event\").reset_index()\n\n for minute in range(10*60*24):\n # print(minute)\n # Get patients who arrived before the current minute and who left the system after the current minute\n # (or arrived but didn't reach the point of being seen before the model run ended)\n # When turning this into a function, think we will want user to pass\n # 'first step' and 'last step' or something similar\n # and will want to reshape the event log for this so that it has a clear start/end regardless\n # of pathway (move all the pathway stuff into a separate column?)\n\n # Think we maybe need a pathway order and pathway precedence column\n # But what about shared elements of each pathway?\n if minute % every_x_minutes == 0:\n\n try:\n current_patients_in_moment = pivoted_log[(pivoted_log['arrival'] <= minute) & \n (\n (pivoted_log['depart'] >= minute) |\n (pivoted_log['depart'].isnull() )\n )]['patient'].values\n except KeyError:\n current_patients_in_moment = None\n \n if current_patients_in_moment is not None:\n patient_minute_df = filtered_log_rep[filtered_log_rep['patient'].isin(current_patients_in_moment)]\n # print(len(patient_minute_df))\n # Grab just those clients from the filtered log (the unpivoted version)\n # Each person can only be in a single place at once, so filter out any events\n # that have taken place after the minute\n # then just take the latest event that has taken place for each client\n # most_recent_events_minute = patient_minute_df[patient_minute_df['time'] <= minute] \\\n # .sort_values('time', ascending=True) \\\n # .groupby(['patient',\"event_type\",\"pathway\"]) \\\n # .tail(1) \n\n most_recent_events_minute_ungrouped = patient_minute_df[patient_minute_df['time'] <= minute].reset_index() \\\n .sort_values(['time', 'index'], ascending=True) \\\n .groupby(['patient']) \\\n .tail(1) \n\n patient_dfs.append(most_recent_events_minute_ungrouped.assign(minute=minute, rep=rep))\n\n # Now count how many people are in each state\n # CHECK - I THINK THIS IS PROBABLY DOUBLE COUNTING PEOPLE BECAUSE OF THE PATHWAY AND EVENT TYPE. JUST JOIN PATHWAY/EVENT TYPE BACK IN INSTEAD?\n state_counts_minute = most_recent_events_minute_ungrouped[['event']].value_counts().rename(\"count\").reset_index().assign(minute=minute, rep=rep)\n \n minute_dfs.append(state_counts_minute)\n\n\n minute_counts_df = pd.concat(minute_dfs).merge(filtered_log_rep[['event','event_type', 'pathway']].drop_duplicates().reset_index(drop=True), on=\"event\")\n full_patient_df = pd.concat(patient_dfs).sort_values([\"rep\", \"minute\", \"event\"])\n\n # Add a final exit step for each client\n final_step = full_patient_df.sort_values([\"rep\", \"patient\", \"minute\"], ascending=True).groupby([\"rep\", \"patient\"]).tail(1)\n final_step['minute'] = final_step['minute'] + every_x_minutes\n final_step['event'] = \"exit\"\n # final_step['event_type'] = \"arrival_departure\"\n\n full_patient_df = full_patient_df.append(final_step)\n\n minute_counts_df_pivoted = minute_counts_df.pivot_table(values=\"count\", \n index=[\"minute\", \"rep\", \"event_type\", \"pathway\"], \n columns=\"event\").reset_index().fillna(0)\n\n minute_counts_df_complete = minute_counts_df_pivoted.melt(id_vars=[\"minute\", \"rep\",\"event_type\",\"pathway\"])\n\n return {\n \"minute_counts_df\": minute_counts_df,\n \"minute_counts_df_complete\": minute_counts_df_complete,\n \"full_patient_df\": full_patient_df.sort_values([\"rep\", \"minute\", \"event\"])\n \n }" }, { "identifier": "animate_activity_log", "path": "output_animation_functions.py", "snippet": "def animate_activity_log(\n full_patient_df,\n event_position_df,\n scenario,\n rep=1,\n plotly_height=900,\n plotly_width=None,\n wrap_queues_at=None,\n include_play_button=True,\n return_df_only=False,\n add_background_image=None,\n display_stage_labels=True,\n icon_and_text_size=24,\n override_x_max=None,\n override_y_max=None,\n time_display_units=None,\n setup_mode=False,\n frame_duration=400, #milliseconds\n frame_transition_duration=600 #milliseconds\n ):\n \"\"\"_summary_\n\n Args:\n full_patient_df (pd.Dataframe): \n \n event_position_dicts (pd.Dataframe): \n dataframe with three cols - event, x and y\n Can be more easily created by passing a list of dicts to pd.DataFrame\n list of dictionaries with one dicitionary per event type\n containing keys 'event', 'x' and 'y'\n This will determine the intial position of any entries in the animated log\n (think of it as the bottom right hand corner of any group of entities at each stage)\n\n scenario:\n Pass in an object that specifies the number of resources at different steps\n\n rep (int, optional): Defaults to 1.\n The replication of any model to include. Can only display one rep at a time, so will take\n the first rep if not otherwise specified. \n \n plotly_height (int, optional): Defaults to 900.\n\n Returns:\n Plotly fig object\n \"\"\" \n\n # Filter to only a single replication\n\n # TODO: Remove this from this function, and instead write a test\n # to ensure that no patient ID appears in multiple places at a single minute\n # and return an error if it does so\n # Move the step of ensuring there's only a single model run involved to outside\n # of this function as it's not really its job. \n\n full_patient_df = full_patient_df[full_patient_df['rep'] == rep].sort_values([\n 'event','minute','time'\n ])\n\n # full_patient_df['count'] = full_patient_df.groupby(['event','minute','rep'])['minute'] \\\n # .transform('count')\n \n # Order patients within event/minute/rep to determine their eventual position in the line\n full_patient_df['rank'] = full_patient_df.groupby(['event','minute','rep'])['minute'] \\\n .rank(method='first')\n\n full_patient_df_plus_pos = full_patient_df.merge(event_position_df, on=\"event\", how='left') \\\n .sort_values([\"rep\", \"event\", \"minute\", \"time\"])\n\n # Determine the position for any resource use steps\n resource_use = full_patient_df_plus_pos[full_patient_df_plus_pos['event_type'] == \"resource_use\"].copy()\n resource_use['y_final'] = resource_use['y']\n resource_use['x_final'] = resource_use['x'] - resource_use['resource_id']*10\n\n # Determine the position for any queuing steps\n queues = full_patient_df_plus_pos[full_patient_df_plus_pos['event_type']=='queue']\n queues['y_final'] = queues['y']\n queues['x_final'] = queues['x'] - queues['rank']*10\n\n # If we want people to wrap at a certain queue length, do this here\n # They'll wrap at the defined point and then the queue will start expanding upwards\n # from the starting row\n if wrap_queues_at is not None:\n queues['row'] = np.floor((queues['rank']) / (wrap_queues_at+1))\n queues['x_final'] = queues['x_final'] + (wrap_queues_at*queues['row']*10)\n queues['y_final'] = queues['y_final'] + (queues['row'] * 30)\n\n full_patient_df_plus_pos = pd.concat([queues, resource_use])\n\n # full_patient_df_plus_pos['icon'] = '🙍'\n\n individual_patients = full_patient_df['patient'].drop_duplicates().sort_values()\n \n # Recommend https://emojipedia.org/ for finding emojis to add to list\n # note that best compatibility across systems can be achieved by using \n # emojis from v12.0 and below - Windows 10 got no more updates after that point\n icon_list = [\n '🧔🏼', '👨🏿‍🦯', '👨🏻‍🦰', '🧑🏻', '👩🏿‍🦱', \n '🤰', '👳🏽', '👩🏼‍🦳', '👨🏿‍🦳', '👩🏼‍🦱', \n '🧍🏽‍♀️', '👨🏼‍🔬', '👩🏻‍🦰', '🧕🏿', '👨🏼‍🦽', \n '👴🏾', '👨🏼‍🦱', '👷🏾', '👧🏿', '🙎🏼‍♂️',\n '👩🏻‍🦲', '🧔🏾', '🧕🏻', '👨🏾‍🎓', '👨🏾‍🦲',\n '👨🏿‍🦰', '🙍🏼‍♂️', '🙋🏾‍♀️', '👩🏻‍🔧', '👨🏿‍🦽', \n '👩🏼‍🦳', '👩🏼‍🦼', '🙋🏽‍♂️', '👩🏿‍🎓', '👴🏻', \n '🤷🏻‍♀️', '👶🏾', '👨🏻‍✈️', '🙎🏿‍♀️', '👶🏻', \n '👴🏿', '👨🏻‍🦳', '👩🏽', '👩🏽‍🦳', '🧍🏼‍♂️', \n '👩🏽‍🎓', '👱🏻‍♀️', '👲🏼', '🧕🏾', '👨🏻‍🦯', \n '🧔🏿', '👳🏿', '🤦🏻‍♂️', '👩🏽‍🦰', '👨🏼‍✈️', \n '👨🏾‍🦲', '🧍🏾‍♂️', '👧🏼', '🤷🏿‍♂️', '👨🏿‍🔧', \n '👱🏾‍♂️', '👨🏼‍🎓', '👵🏼', '🤵🏿', '🤦🏾‍♀️',\n '👳🏻', '🙋🏼‍♂️', '👩🏻‍🎓', '👩🏼‍🌾', '👩🏾‍🔬',\n '👩🏿‍✈️', '🎅🏼', '👵🏿', '🤵🏻', '🤰'\n ]\n\n full_icon_list = icon_list * int(np.ceil(len(individual_patients)/len(icon_list)))\n\n full_icon_list = full_icon_list[0:len(individual_patients)]\n\n full_patient_df_plus_pos = full_patient_df_plus_pos.merge(\n pd.DataFrame({'patient':list(individual_patients),\n 'icon':full_icon_list}),\n on=\"patient\")\n\n if return_df_only:\n return full_patient_df_plus_pos\n\n if override_x_max is not None:\n x_max = override_x_max\n else:\n x_max = event_position_df['x'].max()*1.25\n\n if override_y_max is not None:\n y_max = override_x_max\n else:\n y_max = event_position_df['y'].max()*1.1\n\n # If we're displaying time as a clock instead of as units of whatever time our model\n # is working in, create a minute_display column that will display as a psuedo datetime\n \n # For now, it starts a few months after the current date, just to give the\n # idea of simulating some hypothetical future time. It might be nice to allow\n # the start point to be changed, particular if we're simulating something on\n # a larger timescale that includes a level of weekly or monthly seasonality.\n\n # We need to keep the original minute column in existance because it's important for sorting\n if time_display_units == \"dhm\":\n full_patient_df_plus_pos['minute'] = dt.date.today() + pd.DateOffset(days=165) + pd.TimedeltaIndex(full_patient_df_plus_pos['minute'], unit='m')\n # https://strftime.org/\n full_patient_df_plus_pos['minute_display'] = full_patient_df_plus_pos['minute'].apply(\n lambda x: dt.datetime.strftime(x, '%d %B %Y\\n%H:%M')\n )\n full_patient_df_plus_pos['minute'] = full_patient_df_plus_pos['minute'].apply(\n lambda x: dt.datetime.strftime(x, '%Y-%m-%d %H:%M')\n )\n else:\n full_patient_df_plus_pos['minute_display'] = full_patient_df_plus_pos['minute']\n\n # full_patient_df_plus_pos['size'] = 24\n\n # We are effectively making use of an animated plotly express scatterploy\n # to do all of the heavy lifting\n # Because of the way plots animate in this, it deals with all of the difficulty\n # of paths between individual positions - so we just have to tell it where to put\n # people at each defined step of the process, and the scattergraph will move them\n\n fig = px.scatter(\n full_patient_df_plus_pos.sort_values('minute'),\n x=\"x_final\",\n y=\"y_final\",\n # Each frame is one step of time, with the gap being determined\n # in the reshape_for_animation function\n animation_frame=\"minute_display\",\n # Important to group by patient here\n animation_group=\"patient\",\n text=\"icon\",\n # Can't have colours because it causes bugs with\n # lots of points failing to appear\n #color=\"event\",\n hover_name=\"event\",\n hover_data=[\"patient\", \"pathway\", \"time\", \"minute\", \"resource_id\"],\n # The approach of putting in the people as symbols didn't work\n # Went with making emoji text labels instead - this works better!\n # But leaving in as a reminder that the symbol approach doens't work.\n #symbol=\"rep\",\n #symbol_sequence=[\"⚽\"],\n #symbol_map=dict(rep_choice = \"⚽\"),\n range_x=[0, x_max],\n range_y=[0, y_max],\n height=plotly_height,\n width=plotly_width,\n # This sets the opacity of the points that sit behind\n opacity=0\n # size=\"size\"\n )\n\n # Now add labels identifying each stage (optional - can either be used\n # in conjunction with a background image or as a way to see stage names\n # without the need to create a background image)\n if display_stage_labels:\n fig.add_trace(go.Scatter(\n x=[pos+10 for pos in event_position_df['x'].to_list()],\n y=event_position_df['y'].to_list(),\n mode=\"text\",\n name=\"\",\n text=event_position_df['label'].to_list(),\n textposition=\"middle right\",\n hoverinfo='none'\n ))\n\n # Update the size of the icons and labels\n # This is what determines the size of the individual emojis that \n # represent our people!\n fig.update_traces(textfont_size=icon_and_text_size)\n\n # Finally add in icons to indicate the available resources\n # Make an additional dataframe that has one row per resource type\n # Then, starting from the initial position, make that many large circles\n # make them semi-transparent or you won't see the people using them! \n events_with_resources = event_position_df[event_position_df['resource'].notnull()].copy()\n events_with_resources['resource_count'] = events_with_resources['resource'].apply(lambda x: getattr(scenario, x))\n\n events_with_resources = events_with_resources.join(events_with_resources.apply(\n lambda r: pd.Series({'x_final': [r['x']-(10*(i+1)) for i in range(r['resource_count'])]}), axis=1).explode('x_final'),\n how='right')\n\n # This just adds an additional scatter trace that creates large dots\n # that represent the individual resources\n fig.add_trace(go.Scatter(\n x=events_with_resources['x_final'].to_list(),\n # Place these slightly below the y position for each entity\n # that will be using the resource\n y=[i-10 for i in events_with_resources['y'].to_list()],\n mode=\"markers\",\n # Define what the marker will look like\n marker=dict(\n color='LightSkyBlue',\n size=15),\n opacity=0.8,\n hoverinfo='none'\n ))\n\n # Optional step to add a background image\n # This can help to better visualise the layout/structure of a pathway\n # Simple FOSS tool for creating these background images is draw.io\n # Ideally your queueing steps should always be ABOVE your resource use steps\n # as this then results in people nicely flowing from the front of the queue \n # to the next stage\n if add_background_image is not None:\n fig.add_layout_image(\n dict(\n source=add_background_image,\n xref=\"x domain\",\n yref=\"y domain\",\n x=1,\n y=1,\n sizex=1,\n sizey=1,\n xanchor=\"right\",\n yanchor=\"top\",\n sizing=\"stretch\",\n opacity=0.5,\n layer=\"below\")\n )\n\n # We don't need any gridlines or tickmarks for the final output, so remove\n # However, can be useful for the initial setup phase of the outputs, so give the \n # option to inlcude\n if not setup_mode:\n fig.update_xaxes(showticklabels=False, showgrid=False, zeroline=False, \n # Prevent zoom\n fixedrange=True)\n fig.update_yaxes(showticklabels=False, showgrid=False, zeroline=False, \n # Prevent zoom\n fixedrange=True)\n\n fig.update_layout(yaxis_title=None, xaxis_title=None, showlegend=False,\n # Increase the size of the play button and animation timeline\n sliders=[dict(currentvalue=dict(font=dict(size=35) ,\n prefix=\"\"))]\n )\n\n # You can get rid of the play button if desired\n # Was more useful in older versions of the function\n if not include_play_button:\n fig[\"layout\"].pop(\"updatemenus\")\n\n # Adjust speed of animation\n fig.layout.updatemenus[0].buttons[0].args[1]['frame']['duration'] = frame_duration\n fig.layout.updatemenus[0].buttons[0].args[1]['transition']['duration'] = frame_transition_duration\n\n return fig" } ]
import gc import asyncio import pandas as pd import plotly.express as px import plotly.graph_objects as go import streamlit as st import numpy as np from helper_functions import add_logo, mermaid, center_running from model_classes import Scenario, multiple_replications from output_animation_functions import reshape_for_animations, animate_activity_log
11,011
st.subheader("Non-Trauma Treatment") n_cubicles_1 = st.slider("👨‍⚕️👩‍⚕️ Number of Treatment Cubicles for Non-Trauma", 1, 10, step=1, value=2) non_trauma_treat_p = st.slider("🤕 Probability that a non-trauma patient will need treatment", 0.0, 1.0, step=0.01, value=0.7, help="0 = No non-trauma patients need treatment\n\n1 = All non-trauma patients need treatment") col5, col6 = st.columns(2) with col5: st.write("Total rooms in use is {}".format(n_cubicles_1+n_cubicles_2+n_exam+n_trauma+n_triage+n_reg)) with col6: with st.expander("Advanced Parameters"): seed = st.slider("🎲 Set a random number for the computer to start from", 1, 1000, step=1, value=42) n_reps = st.slider("🔁 How many times should the simulation run? WARNING: Fast/modern computer required to take this above 5 replications.", 1, 10, step=1, value=3) run_time_days = st.slider("🗓️ How many days should we run the simulation for each time?", 1, 60, step=1, value=5) args = Scenario( random_number_set=seed, n_triage=n_triage, n_reg=n_reg, n_exam=n_exam, n_trauma=n_trauma, n_cubicles_1=n_cubicles_1, n_cubicles_2=n_cubicles_2, non_trauma_treat_p=non_trauma_treat_p, prob_trauma=prob_trauma) # A user must press a streamlit button to run the model button_run_pressed = st.button("Run simulation") if button_run_pressed: # add a spinner and then display success box with st.spinner('Simulating the minor injuries unit...'): await asyncio.sleep(0.1) my_bar = st.progress(0, text="Simulating the minor injuries unit...") # run multiple replications of experment detailed_outputs = multiple_replications( args, n_reps=n_reps, rc_period=run_time_days*60*24, return_detailed_logs=True ) my_bar.progress(40, text="Collating Simulation Outputs...") results = pd.concat([detailed_outputs[i]['results']['summary_df'].assign(rep= i+1) for i in range(n_reps)]).set_index('rep') full_event_log = pd.concat([detailed_outputs[i]['results']['full_event_log'].assign(rep= i+1) for i in range(n_reps)]) del detailed_outputs gc.collect() my_bar.progress(60, text="Logging Results...") # print(len(st.session_state['session_results'])) # results_for_state = pd.DataFrame(results.median()).T.drop(['Rep'], axis=1) results_for_state = results original_cols = results_for_state.columns.values results_for_state['Triage\nCubicles'] = args.n_triage results_for_state['Registration\nClerks'] = args.n_reg results_for_state['Examination\nRooms'] = args.n_exam results_for_state['Non-Trauma\nTreatment Cubicles'] = args.n_cubicles_1 results_for_state['Trauma\nStabilisation Bays'] = args.n_trauma results_for_state['Trauma\nTreatment Cubicles'] = args.n_cubicles_2 results_for_state['Probability patient\nis a trauma patient'] = args.prob_trauma results_for_state['Probability non-trauma patients\nrequire treatment'] = args.non_trauma_treat_p results_for_state['Model Run'] = len(st.session_state['session_results']) + 1 results_for_state['Random Seed'] = seed # Reorder columns column_order = ['Model Run', 'Triage\nCubicles', 'Registration\nClerks', 'Examination\nRooms', 'Non-Trauma\nTreatment Cubicles', 'Trauma\nStabilisation Bays', 'Trauma\nTreatment Cubicles', 'Probability patient\nis a trauma patient', 'Probability non-trauma patients\nrequire treatment', 'Random Seed' ] + list(original_cols) results_for_state = results_for_state[column_order] current_state = st.session_state['session_results'] current_state.append(results_for_state) del results_for_state gc.collect() st.session_state['session_results'] = current_state del current_state gc.collect() # print(len(st.session_state['session_results'])) # UTILISATION AUDIT - BRING BACK WHEN NEEDED # full_utilisation_audit = pd.concat([detailed_outputs[i]['results']['utilisation_audit'].assign(Rep= i+1) # for i in range(n_reps)]) # animation_dfs_queue = reshape_for_animations( # full_event_log[ # (full_event_log['rep']==1) & # ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='arrival_departure')) # ] # ) my_bar.progress(80, text="Creating Animations...")
''' A Streamlit application based on Monks and Allows users to interact with an increasingly more complex treatment simulation ''' st.set_page_config( page_title="The Full Model", layout="wide", initial_sidebar_state="expanded", ) # Initialise session state if 'session_results' not in st.session_state: st.session_state['session_results'] = [] add_logo() center_running() with open("style.css") as css: st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True) ## We add in a title for our web app's page st.title("Discrete Event Simulation Playground") st.subheader("How can we optimise the full system?") st.markdown("Once you have run more than one scenario, try out the new tab 'compare scenario outputs'.") gc.collect() # tab1, tab2, tab3, tab4 = st.tabs(["Introduction", "Exercises", "Playground", "Compare Scenario Outputs"]) tab1, tab2, tab3, tab4 = st.tabs(["Playground", "Exercise", "Compare Scenario Outputs", "Information"]) with tab4: st.markdown(""" So now we have explored every component of the model: - Generating arrivals - Generating and using resources - Sending people down different paths So now let's create a version of the model that uses all of these aspects. For now, we won't consider nurses separately - we will assume that each nurse on shift has one room that is theirs to always use. """ ) mermaid(height=600, code= """ %%{ init: { 'flowchart': { 'curve': 'step' } } }%% %%{ init: { 'theme': 'base', 'themeVariables': {'lineColor': '#b4b4b4'} } }%% flowchart LR A[Arrival] --> BX[Triage] BX -.-> T([Triage Bay\n<b>RESOURCE</b>]) T -.-> BX BX --> BY{Trauma or non-trauma} BY ----> B1{Trauma Pathway} BY ----> B2{Non-Trauma Pathway} B1 --> C[Stabilisation] C --> E[Treatment] B2 --> D[Registration] D --> G[Examination] G --> H[Treat?] H ----> F H --> I[Non-Trauma Treatment] I --> F C -.-> Z([Trauma Room\n<b>RESOURCE</b>]) Z -.-> C E -.-> Y([Cubicle - 1\n<b>RESOURCE</b>]) Y -.-> E D -.-> X([Clerks\n<b>RESOURCE</b>]) X -.-> D G -.-> W([Exam Room\n<b>RESOURCE</b>]) W -.-> G I -.-> V([Cubicle - 2\n<b>RESOURCE</b>]) V -.-> I E ----> F[Discharge] classDef ZZ1 fill:#8B5E0F,font-family:lexend, color:#FFF classDef ZZ2 fill:#5DFDA0,font-family:lexend classDef ZZ2a fill:#02CD55,font-family:lexend, color:#FFF classDef ZZ3 fill: #D45E5E,font-family:lexend classDef ZZ3a fill: #932727,font-family:lexend, color:#FFF classDef ZZ4 fill: #611D67,font-family:lexend, color:#FFF classDef ZZ5 fill:#47D7FF,font-family:lexend classDef ZZ5a fill:#00AADA,font-family:lexend class A ZZ1 class C,E ZZ2 class D,G ZZ3 class X,W ZZ3a class Z,Y ZZ2a class I,V ZZ4 class BX ZZ5 class T ZZ5a ; """ ) with tab2: st.header("Things to Try") st.markdown( """ - First, just run the model with the default settings. - Look at the graphs and animated patient log. What is the performance of the system like? - Are the queues consistent throughout the day? --- - Due to building work taking place, the hospital will temporarily need to close several bays. It will be possible to have a maximum of 20 bays/cubicles/rooms in total across the whole system. - What is the best configuration you can find to keep the average wait times as low as possible across both trauma and non-trauma pathways? *Make sure you are using the default probabilities for trauma/non-trauma patients (0.3) and treatment of non-trauma patients (0.7)* """ ) with tab1: # n_triage: int # The number of triage cubicles # n_reg: int # The number of registration clerks # n_exam: int # The number of examination rooms # n_trauma: int # The number of trauma bays for stablisation # n_cubicles_1: int # The number of non-trauma treatment cubicles # n_cubicles_2: int # The number of trauma treatment cubicles # non_trauma_treat_p: float # Probability non trauma patient requires treatment # prob_trauma: float # probability that a new arrival is a trauma patient. col1, col2, col3, col4 = st.columns(4) with col1: st.subheader("Triage") n_triage = st.slider("👨‍⚕️👩‍⚕️ Number of Triage Cubicles", 1, 10, step=1, value=4) prob_trauma = st.slider("🚑 Probability that a new arrival is a trauma patient", 0.0, 1.0, step=0.01, value=0.3, help="0 = No arrivals are trauma patients\n\n1 = All arrivals are trauma patients") with col2: st.subheader("Trauma Pathway") n_trauma = st.slider("👨‍⚕️👩‍⚕️ Number of Trauma Bays for Stabilisation", 1, 10, step=1, value=6) n_cubicles_2 = st.slider("👨‍⚕️👩‍⚕️ Number of Treatment Cubicles for Trauma", 1, 10, step=1, value=6) with col3: st.subheader("Non-Trauma Pathway") n_reg = st.slider("👨‍⚕️👩‍⚕️ Number of Registration Cubicles", 1, 10, step=1, value=3) n_exam = st.slider("👨‍⚕️👩‍⚕️ Number of Examination Rooms for non-trauma patients", 1, 10, step=1, value=3) with col4: st.subheader("Non-Trauma Treatment") n_cubicles_1 = st.slider("👨‍⚕️👩‍⚕️ Number of Treatment Cubicles for Non-Trauma", 1, 10, step=1, value=2) non_trauma_treat_p = st.slider("🤕 Probability that a non-trauma patient will need treatment", 0.0, 1.0, step=0.01, value=0.7, help="0 = No non-trauma patients need treatment\n\n1 = All non-trauma patients need treatment") col5, col6 = st.columns(2) with col5: st.write("Total rooms in use is {}".format(n_cubicles_1+n_cubicles_2+n_exam+n_trauma+n_triage+n_reg)) with col6: with st.expander("Advanced Parameters"): seed = st.slider("🎲 Set a random number for the computer to start from", 1, 1000, step=1, value=42) n_reps = st.slider("🔁 How many times should the simulation run? WARNING: Fast/modern computer required to take this above 5 replications.", 1, 10, step=1, value=3) run_time_days = st.slider("🗓️ How many days should we run the simulation for each time?", 1, 60, step=1, value=5) args = Scenario( random_number_set=seed, n_triage=n_triage, n_reg=n_reg, n_exam=n_exam, n_trauma=n_trauma, n_cubicles_1=n_cubicles_1, n_cubicles_2=n_cubicles_2, non_trauma_treat_p=non_trauma_treat_p, prob_trauma=prob_trauma) # A user must press a streamlit button to run the model button_run_pressed = st.button("Run simulation") if button_run_pressed: # add a spinner and then display success box with st.spinner('Simulating the minor injuries unit...'): await asyncio.sleep(0.1) my_bar = st.progress(0, text="Simulating the minor injuries unit...") # run multiple replications of experment detailed_outputs = multiple_replications( args, n_reps=n_reps, rc_period=run_time_days*60*24, return_detailed_logs=True ) my_bar.progress(40, text="Collating Simulation Outputs...") results = pd.concat([detailed_outputs[i]['results']['summary_df'].assign(rep= i+1) for i in range(n_reps)]).set_index('rep') full_event_log = pd.concat([detailed_outputs[i]['results']['full_event_log'].assign(rep= i+1) for i in range(n_reps)]) del detailed_outputs gc.collect() my_bar.progress(60, text="Logging Results...") # print(len(st.session_state['session_results'])) # results_for_state = pd.DataFrame(results.median()).T.drop(['Rep'], axis=1) results_for_state = results original_cols = results_for_state.columns.values results_for_state['Triage\nCubicles'] = args.n_triage results_for_state['Registration\nClerks'] = args.n_reg results_for_state['Examination\nRooms'] = args.n_exam results_for_state['Non-Trauma\nTreatment Cubicles'] = args.n_cubicles_1 results_for_state['Trauma\nStabilisation Bays'] = args.n_trauma results_for_state['Trauma\nTreatment Cubicles'] = args.n_cubicles_2 results_for_state['Probability patient\nis a trauma patient'] = args.prob_trauma results_for_state['Probability non-trauma patients\nrequire treatment'] = args.non_trauma_treat_p results_for_state['Model Run'] = len(st.session_state['session_results']) + 1 results_for_state['Random Seed'] = seed # Reorder columns column_order = ['Model Run', 'Triage\nCubicles', 'Registration\nClerks', 'Examination\nRooms', 'Non-Trauma\nTreatment Cubicles', 'Trauma\nStabilisation Bays', 'Trauma\nTreatment Cubicles', 'Probability patient\nis a trauma patient', 'Probability non-trauma patients\nrequire treatment', 'Random Seed' ] + list(original_cols) results_for_state = results_for_state[column_order] current_state = st.session_state['session_results'] current_state.append(results_for_state) del results_for_state gc.collect() st.session_state['session_results'] = current_state del current_state gc.collect() # print(len(st.session_state['session_results'])) # UTILISATION AUDIT - BRING BACK WHEN NEEDED # full_utilisation_audit = pd.concat([detailed_outputs[i]['results']['utilisation_audit'].assign(Rep= i+1) # for i in range(n_reps)]) # animation_dfs_queue = reshape_for_animations( # full_event_log[ # (full_event_log['rep']==1) & # ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='arrival_departure')) # ] # ) my_bar.progress(80, text="Creating Animations...")
animation_dfs_log = reshape_for_animations(
5
2023-10-26 09:57:52+00:00
16k
hyperspy/exspy
exspy/models/edsmodel.py
[ { "identifier": "_get_element_and_line", "path": "exspy/misc/eds/utils.py", "snippet": "def _get_element_and_line(xray_line):\n \"\"\"\n Returns the element name and line character for a particular X-ray line as\n a tuple.\n\n By example, if xray_line = 'Mn_Ka' this function returns ('Mn', 'Ka')\n \"\"\"\n lim = xray_line.find(\"_\")\n if lim == -1:\n raise ValueError(f\"Invalid xray-line: {xray_line}\")\n return xray_line[:lim], xray_line[lim + 1 :]" }, { "identifier": "EDSSpectrum", "path": "exspy/signals/eds.py", "snippet": "class EDSSpectrum(Signal1D):\n \"\"\"General signal class for EDS spectra.\"\"\"\n\n _signal_type = \"EDS\"\n\n def __init__(self, *args, **kwards):\n super().__init__(*args, **kwards)\n if self.metadata.Signal.signal_type == \"EDS\":\n warnings.warn(\n \"The microscope type is not set. Use \"\n \"set_signal_type('EDS_TEM') \"\n \"or set_signal_type('EDS_SEM')\"\n )\n self.axes_manager.signal_axes[0].is_binned = True\n self._xray_markers = {}\n\n def _get_line_energy(self, Xray_line, FWHM_MnKa=None):\n \"\"\"\n Get the line energy and the energy resolution of a Xray line.\n\n The return values are in the same units than the signal axis\n\n Parameters\n ----------\n Xray_line : strings\n Valid element X-ray lines e.g. Fe_Kb\n FWHM_MnKa: {None, float, 'auto'}\n The energy resolution of the detector in eV\n if 'auto', used the one in\n 'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'\n\n Returns\n -------\n float: the line energy, if FWHM_MnKa is None\n (float,float): the line energy and the energy resolution, if FWHM_MnKa\n is not None\n \"\"\"\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if FWHM_MnKa == \"auto\":\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa\n )\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa\n )\n else:\n raise NotImplementedError(\n \"This method only works for EDS_TEM or EDS_SEM signals. \"\n \"You can use `set_signal_type('EDS_TEM')` or\"\n \"`set_signal_type('EDS_SEM')` to convert to one of these\"\n \"signal types.\"\n )\n line_energy = utils_eds._get_energy_xray_line(Xray_line)\n if units_name == \"eV\":\n line_energy *= 1000\n if FWHM_MnKa is not None:\n line_FWHM = (\n utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy / 1000) * 1000\n )\n elif units_name == \"keV\":\n if FWHM_MnKa is not None:\n line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy)\n else:\n raise ValueError(\n f\"{units_name} is not a valid units for the energy axis. \"\n \"Only `eV` and `keV` are supported. \"\n \"If `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.axes_manager.signal_axes[0].units = 'keV' \\n\"\n )\n if FWHM_MnKa is None:\n return line_energy\n else:\n return line_energy, line_FWHM\n\n def _get_beam_energy(self):\n \"\"\"\n Get the beam energy.\n\n The return value is in the same units than the signal axis\n \"\"\"\n\n if \"Acquisition_instrument.SEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy\n elif \"Acquisition_instrument.TEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy\n else:\n raise AttributeError(\n \"The beam energy is not defined in `metadata`. \"\n \"Use `set_microscope_parameters` to set it.\"\n )\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if units_name == \"eV\":\n beam_energy *= 1000\n return beam_energy\n\n def _get_xray_lines_in_spectral_range(self, xray_lines):\n \"\"\"\n Return the lines in the energy range\n\n Parameters\n ----------\n xray_lines: List of string\n The xray_lines\n\n Return\n ------\n The list of xray_lines in the energy range\n \"\"\"\n ax = self.axes_manager.signal_axes[0]\n low_value = ax.low_value\n high_value = ax.high_value\n try:\n if self._get_beam_energy() < high_value:\n high_value = self._get_beam_energy()\n except AttributeError:\n # in case the beam energy is not defined in the metadata\n pass\n xray_lines_in_range = []\n xray_lines_not_in_range = []\n for xray_line in xray_lines:\n line_energy = self._get_line_energy(xray_line)\n if low_value < line_energy < high_value:\n xray_lines_in_range.append(xray_line)\n else:\n xray_lines_not_in_range.append(xray_line)\n return xray_lines_in_range, xray_lines_not_in_range\n\n def sum(self, axis=None, out=None, rechunk=False):\n if axis is None:\n axis = self.axes_manager.navigation_axes\n s = super().sum(axis=axis, out=out, rechunk=rechunk)\n s = out or s\n\n # Update live time by the change in navigation axes dimensions\n time_factor = np.prod(\n [ax.size for ax in self.axes_manager.navigation_axes]\n ) / np.prod([ax.size for ax in s.axes_manager.navigation_axes])\n aimd = s.metadata.get_item(\"Acquisition_instrument\", None)\n if aimd is not None:\n aimd = s.metadata.Acquisition_instrument\n if \"SEM.Detector.EDS.live_time\" in aimd:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"TEM.Detector.EDS.live_time\" in aimd:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and \"\n \"has not been updated.\"\n )\n\n if out is None:\n return s\n\n sum.__doc__ = Signal1D.sum.__doc__\n\n def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None):\n factors = self._validate_rebin_args_and_get_factors(\n new_shape=new_shape,\n scale=scale,\n )\n m = super().rebin(\n new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out\n )\n m = out or m\n time_factor = np.prod(\n [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes]\n )\n aimd = m.metadata.Acquisition_instrument\n if \"Acquisition_instrument.SEM.Detector.EDS.real_time\" in m.metadata:\n aimd.SEM.Detector.EDS.real_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.real_time\" in m.metadata:\n aimd.TEM.Detector.EDS.real_time *= time_factor\n else:\n _logger.info(\n \"real_time could not be found in the metadata and has not been updated.\"\n )\n if \"Acquisition_instrument.SEM.Detector.EDS.live_time\" in m.metadata:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.live_time\" in m.metadata:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and has not been updated.\"\n )\n\n if out is None:\n return m\n else:\n out.events.data_changed.trigger(obj=out)\n return m\n\n rebin.__doc__ = BaseSignal.rebin.__doc__\n\n def set_elements(self, elements):\n \"\"\"Erase all elements and set them.\n\n Parameters\n ----------\n elements : list of strings\n A list of chemical element symbols.\n\n See also\n --------\n add_elements, set_lines, add_lines\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.set_elements(['Al'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al']\n\n \"\"\"\n # Erase previous elements and X-ray lines\n if \"Sample.elements\" in self.metadata:\n del self.metadata.Sample.elements\n self.add_elements(elements)\n\n def add_elements(self, elements):\n \"\"\"Add elements and the corresponding X-ray lines.\n\n The list of elements is stored in `metadata.Sample.elements`\n\n Parameters\n ----------\n elements : list of strings\n The symbol of the elements.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.add_elements(['Ar'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al', 'Ar', 'C', 'Cu', 'Mn', 'Zr']\n\n See also\n --------\n set_elements, add_lines, set_lines\n\n \"\"\"\n if not isiterable(elements) or isinstance(elements, str):\n raise ValueError(\n \"Input must be in the form of a list. For example, \"\n \"if `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.add_elements(('C',))\\n\"\n \"See the docstring for more information.\"\n )\n if \"Sample.elements\" in self.metadata:\n elements_ = set(self.metadata.Sample.elements)\n else:\n elements_ = set()\n for element in elements:\n if element in elements_db:\n elements_.add(element)\n else:\n raise ValueError(f\"{element} is not a valid chemical element symbol.\")\n self.metadata.set_item(\"Sample.elements\", sorted(list(elements_)))\n\n def _get_xray_lines(self, xray_lines=None, only_one=None, only_lines=(\"a\",)):\n if xray_lines is None:\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n raise ValueError(\"Not X-ray line, set them with `add_elements`.\")\n return xray_lines\n\n def set_lines(self, lines, only_one=True, only_lines=(\"a\",)):\n \"\"\"Erase all Xrays lines and set them.\n\n See add_lines for details.\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.set_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_La', 'Zr_La']\n\n See also\n --------\n add_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n del self.metadata.Sample.xray_lines\n self.add_lines(lines=lines, only_one=only_one, only_lines=only_lines)\n\n def add_lines(self, lines=(), only_one=True, only_lines=(\"a\",)):\n \"\"\"Add X-rays lines to the internal list.\n\n Although most functions do not require an internal list of\n X-ray lines because they can be calculated from the internal\n list of elements, ocassionally it might be useful to customize the\n X-ray lines to be use by all functions by default using this method.\n The list of X-ray lines is stored in\n `metadata.Sample.xray_lines`\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list. If the list is empty (default), and\n `metadata.Sample.elements` is\n defined, add the lines of all those elements.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_microscope_parameters(beam_energy=30)\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_Ka', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.add_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n See also\n --------\n set_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = set(self.metadata.Sample.xray_lines)\n else:\n xray_lines = set()\n # Define the elements which Xray lines has been customized\n # So that we don't attempt to add new lines automatically\n elements = set()\n for line in xray_lines:\n elements.add(line.split(\"_\")[0])\n for line in lines:\n try:\n element, subshell = line.split(\"_\")\n except ValueError:\n raise ValueError(\n \"Invalid line symbol. \"\n \"Please provide a valid line symbol e.g. Fe_Ka\"\n )\n if element in elements_db:\n elements.add(element)\n if subshell in elements_db[element][\"Atomic_properties\"][\"Xray_lines\"]:\n lines_len = len(xray_lines)\n xray_lines.add(line)\n if lines_len != len(xray_lines):\n _logger.info(f\"{line} line added,\")\n else:\n _logger.info(f\"{line} line already in.\")\n else:\n raise ValueError(f\"{line} is not a valid line of {element}.\")\n else:\n raise ValueError(f\"{element} is not a valid symbol of an element.\")\n xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)[1]\n for xray in xray_not_here:\n warnings.warn(f\"{xray} is not in the data energy range.\", UserWarning)\n if \"Sample.elements\" in self.metadata:\n extra_elements = set(self.metadata.Sample.elements) - elements\n if extra_elements:\n new_lines = self._get_lines_from_elements(\n extra_elements, only_one=only_one, only_lines=only_lines\n )\n if new_lines:\n self.add_lines(list(new_lines) + list(lines))\n self.add_elements(elements)\n if not hasattr(self.metadata, \"Sample\"):\n self.metadata.add_node(\"Sample\")\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = xray_lines.union(self.metadata.Sample.xray_lines)\n self.metadata.Sample.xray_lines = sorted(list(xray_lines))\n\n def _get_lines_from_elements(self, elements, only_one=False, only_lines=(\"a\",)):\n \"\"\"Returns the X-ray lines of the given elements in spectral range\n of the data.\n\n Parameters\n ----------\n elements : list of strings\n A list containing the symbol of the chemical elements.\n only_one : bool\n If False, add all the lines of each element in the data spectral\n range. If True only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be returned.\n\n Returns\n -------\n list of X-ray lines alphabetically sorted\n\n \"\"\"\n\n only_lines = utils_eds._parse_only_lines(only_lines)\n try:\n beam_energy = self._get_beam_energy()\n except BaseException:\n # Fall back to the high_value of the energy axis\n beam_energy = self.axes_manager.signal_axes[0].high_value\n lines = []\n elements = [el if isinstance(el, str) else el.decode() for el in elements]\n for element in elements:\n # Possible line (existing and excited by electron)\n element_lines = []\n for subshell in list(\n elements_db[element][\"Atomic_properties\"][\"Xray_lines\"].keys()\n ):\n if only_lines and subshell not in only_lines:\n continue\n element_lines.append(element + \"_\" + subshell)\n element_lines = self._get_xray_lines_in_spectral_range(element_lines)[0]\n if only_one and element_lines:\n # Choose the best line\n select_this = -1\n element_lines.sort()\n for i, line in enumerate(element_lines):\n if self._get_line_energy(line) < beam_energy / 2:\n select_this = i\n break\n element_lines = [\n element_lines[select_this],\n ]\n\n if not element_lines:\n _logger.info(\n f\"There is no X-ray line for element {element} \"\n \"in the data spectral range\"\n )\n else:\n lines.extend(element_lines)\n lines.sort()\n return lines\n\n def _parse_xray_lines(self, xray_lines, only_one, only_lines):\n only_lines = utils_eds._parse_only_lines(only_lines)\n xray_lines = self._get_xray_lines(\n xray_lines, only_one=only_one, only_lines=only_lines\n )\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)\n for xray in xray_not_here:\n warnings.warn(\n f\"{xray} is not in the data energy range. \"\n \"You can remove it with: \"\n f\"`s.metadata.Sample.xray_lines.remove('{xray}')`\"\n )\n return xray_lines\n\n def get_lines_intensity(\n self,\n xray_lines=None,\n integration_windows=2.0,\n background_windows=None,\n plot_result=False,\n only_one=True,\n only_lines=(\"a\",),\n **kwargs,\n ):\n \"\"\"Return the intensity map of selected Xray lines.\n\n The intensities, the number of X-ray counts, are computed by\n suming the spectrum over the\n different X-ray lines. The sum window width\n is calculated from the energy resolution of the detector\n as defined in 'energy_resolution_MnKa' of the metadata.\n Backgrounds average in provided windows can be subtracted from the\n intensities.\n\n Parameters\n ----------\n xray_lines: {None, Iterable* of strings}\n If None,\n if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those.\n If `metadata.Sample.elements.xray_lines` is undefined\n or empty but `metadata.Sample.elements` is defined,\n use the same syntax as `add_line` to select a subset of lines\n for the operation.\n Alternatively, provide an iterable containing\n a list of valid X-ray lines symbols.\n * Note that while dictionaries and strings are iterable,\n their use is ambiguous and specifically not allowed.\n integration_windows: Float or array\n If float, the width of the integration windows is the\n 'integration_windows_width' times the calculated FWHM of the line.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n background_windows: None or 2D array of float\n If None, no background subtraction. Else, the backgrounds average\n in the windows are subtracted from the return intensities.\n 'background_windows' provides the position of the windows in\n energy. Each line corresponds to a X-ray line. In a line, the two\n first values correspond to the limits of the left window and the\n two last values correspond to the limits of the right window.\n plot_result : bool\n If True, plot the calculated line intensities. If the current\n object is a single spectrum it prints the result instead.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, use only the given lines.\n kwargs\n The extra keyword arguments for plotting. See\n `utils.plot.plot_signals`\n\n Returns\n -------\n intensities : list\n A list containing the intensities as BaseSignal subclasses.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)\n Mn_La at 0.63316 keV : Intensity = 96700.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows=2.1)\n >>> s.get_lines_intensity(['Mn_Ka'],\n >>> integration_windows=2.1, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 53597.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_elements(['Mn'])\n >>> s.set_lines(['Mn_Ka'])\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 46716.00\n\n See also\n --------\n set_elements, add_elements, estimate_background_windows,\n plot\n\n \"\"\"\n if xray_lines is not None and (\n not isinstance(xray_lines, Iterable) or isinstance(xray_lines, (str, dict))\n ):\n raise TypeError(\n \"xray_lines must be a compatible iterable, but was \"\n f\"mistakenly provided as a {type(xray_lines)}.\"\n )\n\n xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n intensities = []\n ax = self.axes_manager.signal_axes[0]\n # test Signal1D (0D problem)\n # signal_to_index = self.axes_manager.navigation_dimension - 2\n for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):\n element, line = utils_eds._get_element_and_line(Xray_line)\n line_energy = self._get_line_energy(Xray_line)\n # Replace with `map` function for lazy large datasets\n img = self.isig[window[0] : window[1]].integrate1D(\n -1\n ) # integrate over window.\n if np.issubdtype(img.data.dtype, np.integer):\n # The operations below require a float dtype with the default\n # numpy casting rule ('same_kind')\n img.change_dtype(\"float\")\n if background_windows is not None:\n bw = background_windows[i]\n # TODO: test to prevent slicing bug. To be reomved when fixed\n indexes = [float(ax.value2index(de)) for de in list(bw) + window]\n if indexes[0] == indexes[1]:\n bck1 = self.isig[bw[0]]\n else:\n bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)\n if indexes[2] == indexes[3]:\n bck2 = self.isig[bw[2]]\n else:\n bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)\n corr_factor = (indexes[5] - indexes[4]) / (\n (indexes[1] - indexes[0]) + (indexes[3] - indexes[2])\n )\n img = img - (bck1 + bck2) * corr_factor\n img.metadata.General.title = (\n f\"X-ray line intensity of {self.metadata.General.title}: \"\n f\"{Xray_line} at {line_energy:.2f} \"\n f\"{self.axes_manager.signal_axes[0].units}\"\n )\n img = img.transpose(signal_axes=[])\n if plot_result and img.axes_manager.navigation_size == 1:\n if img._lazy:\n img.compute()\n print(\n f\"{Xray_line} at {line_energy} {ax.units} : \"\n f\"Intensity = {img.data[0]:.2f}\"\n )\n img.metadata.set_item(\"Sample.elements\", ([element]))\n img.metadata.set_item(\"Sample.xray_lines\", ([Xray_line]))\n intensities.append(img)\n if plot_result and img.axes_manager.navigation_size != 1:\n utils.plot.plot_signals(intensities, **kwargs)\n return intensities\n\n def get_take_off_angle(self):\n \"\"\"Calculate the take-off-angle (TOA).\n\n TOA is the angle with which the X-rays leave the surface towards\n the detector. Parameters are read in 'SEM.Stage.tilt_alpha',\n 'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and\n 'SEM.Detector.EDS.elevation_angle' and 'SEM.Stage.tilt_beta in\n 'metadata'.\n\n Returns\n -------\n take_off_angle: float\n in Degree\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_take_off_angle()\n 37.0\n >>> s.set_microscope_parameters(tilt_stage=20.)\n >>> s.get_take_off_angle()\n 57.0\n\n See also\n --------\n hs.eds.take_off_angle\n \"\"\"\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n mp = self.metadata.Acquisition_instrument.SEM\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n mp = self.metadata.Acquisition_instrument.TEM\n\n tilt_stage = mp.get_item(\"Stage.tilt_alpha\", None)\n azimuth_angle = mp.get_item(\"Detector.EDS.azimuth_angle\", None)\n elevation_angle = mp.get_item(\"Detector.EDS.elevation_angle\", None)\n beta_tilt = mp.get_item(\"Stage.tilt_beta\", 0.0)\n\n return utils_eds.take_off_angle(\n tilt_stage, azimuth_angle, elevation_angle, beta_tilt\n )\n\n def estimate_integration_windows(self, windows_width=2.0, xray_lines=None):\n \"\"\"\n Estimate a window of integration for each X-ray line.\n\n Parameters\n ----------\n windows_width: float\n The width of the integration windows is the 'windows_width' times\n the calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use 'metadata.Sample.elements.xray_lines'. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n integration_windows: 2D array of float\n The positions of the windows in energy. Each row corresponds to a\n X-ray line. Each row contains the left and right value of the\n window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> iw = s.estimate_integration_windows()\n >>> s.plot(integration_windows=iw)\n >>> s.get_lines_intensity(integration_windows=iw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 3710.00\n Pt_La at 9.4421 keV : Intensity = 15872.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n integration_windows = []\n for Xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa=\"auto\")\n element, line = utils_eds._get_element_and_line(Xray_line)\n det = windows_width * line_FWHM / 2.0\n integration_windows.append([line_energy - det, line_energy + det])\n return integration_windows\n\n def estimate_background_windows(\n self, line_width=[2, 2], windows_width=1, xray_lines=None\n ):\n \"\"\"\n Estimate two windows around each X-ray line containing only the\n background.\n\n Parameters\n ----------\n line_width: list of two floats\n The position of the two windows around the X-ray line is given by\n the `line_width` (left and right) times the calculated FWHM of the\n line.\n windows_width: float\n The width of the windows is is the `windows_width` times the\n calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use `metadata.Sample.elements.xray_lines`. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray line. In a line, the two first values correspond to the\n limits of the left window and the two last values correspond to\n the limits of the right window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 2754.00\n Pt_La at 9.4421 keV : Intensity = 15090.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n windows_position = []\n for xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(xray_line, FWHM_MnKa=\"auto\")\n tmp = [\n line_energy - line_FWHM * line_width[0] - line_FWHM * windows_width,\n line_energy - line_FWHM * line_width[0],\n line_energy + line_FWHM * line_width[1],\n line_energy + line_FWHM * line_width[1] + line_FWHM * windows_width,\n ]\n windows_position.append(tmp)\n windows_position = np.array(windows_position)\n # merge ovelapping windows\n index = windows_position.argsort(axis=0)[:, 0]\n for i in range(len(index) - 1):\n ia, ib = index[i], index[i + 1]\n if windows_position[ia, 2] > windows_position[ib, 0]:\n interv = np.append(windows_position[ia, :2], windows_position[ib, 2:])\n windows_position[ia] = interv\n windows_position[ib] = interv\n return windows_position\n\n def plot(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n navigator=\"auto\",\n plot_markers=True,\n autoscale=\"v\",\n norm=\"auto\",\n axes_manager=None,\n navigator_kwds={},\n **kwargs,\n ):\n \"\"\"Plot the EDS spectrum. The following markers can be added\n\n - The position of the X-ray lines and their names.\n - The background windows associated with each X-ray lines. A black line\n links the left and right window with the average value in each window.\n\n Parameters\n ----------\n xray_lines: {False, True, 'from_elements', list of string}\n If not False, indicate the position and the name of the X-ray\n lines.\n If True, if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those. If `metadata.Sample.elements.xray_lines`\n is undefined or empty or if xray_lines equals 'from_elements' and\n `metadata.Sample.elements` is defined, use the same syntax as\n `add_line` to select a subset of lines for the operation.\n Alternatively, provide an iterable containing a list of valid X-ray\n lines symbols.\n only_lines : None or list of strings\n If not None, use only the given lines (eg. ('a','Kb')).\n If None, use all lines.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n background_windows: None or 2D array of float\n If not None, add markers at the position of the windows in energy.\n Each line corresponds to a X-ray lines. In a line, the two first\n value corresponds to the limit of the left window and the two\n last values corresponds to the limit of the right window.\n integration_windows: None or 'auto' or float or 2D array of float\n If not None, add markers at the position of the integration\n windows.\n If 'auto' (or float), the width of the integration windows is 2.0\n (or float) times the calculated FWHM of the line. see\n 'estimate_integration_windows'.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n %s\n %s\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot()\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(True)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows='auto')\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw, integration_windows=2.1)\n\n See also\n --------\n set_elements, add_elements, estimate_integration_windows,\n get_lines_intensity, estimate_background_windows\n \"\"\"\n super().plot(\n navigator=navigator,\n plot_markers=plot_markers,\n autoscale=autoscale,\n norm=norm,\n axes_manager=axes_manager,\n navigator_kwds=navigator_kwds,\n **kwargs,\n )\n self._plot_xray_lines(\n xray_lines,\n only_lines,\n only_one,\n background_windows,\n integration_windows,\n render_figure=False,\n )\n self._render_figure(plot=[\"signal_plot\"])\n\n plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)\n\n def _plot_xray_lines(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n render_figure=True,\n ):\n if (\n xray_lines is not False\n or background_windows is not None\n or integration_windows is not None\n ):\n if xray_lines is False:\n xray_lines = True\n only_lines = utils_eds._parse_only_lines(only_lines)\n if xray_lines is True or xray_lines == \"from_elements\":\n if (\n \"Sample.xray_lines\" in self.metadata\n and xray_lines != \"from_elements\"\n ):\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n _logger.warning(\"No elements defined, set them with `add_elements`\")\n # No X-rays lines, nothing to do then\n return\n\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(\n xray_lines\n )\n for xray in xray_not_here:\n _logger.warning(f\"{xray} is not in the data energy range.\")\n\n xray_lines = np.unique(xray_lines)\n\n self.add_xray_lines_markers(xray_lines, render_figure=False)\n if background_windows is not None:\n self._add_background_windows_markers(\n background_windows, render_figure=False\n )\n if integration_windows is not None:\n if integration_windows == \"auto\":\n integration_windows = 2.0\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n self._add_vertical_lines_groups(\n integration_windows, linestyle=\"--\", render_figure=False\n )\n # Render figure only at the end\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_vertical_lines_groups(self, position, render_figure=True, **kwargs):\n \"\"\"\n Add vertical markers for each group that shares the color.\n\n Parameters\n ----------\n position: 2D array of float\n The position on the signal axis. Each row corresponds to a\n group.\n kwargs\n keywords argument for :py:class:`~.api.plot.markers.VerticalLine`\n \"\"\"\n colors = itertools.cycle(\n np.sort(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n )\n\n for x, color in zip(position, colors):\n line = VerticalLines(offsets=x, color=color, **kwargs)\n self.add_marker(line, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def add_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Add marker on a spec.plot() with the name of the selected X-ray\n lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines\n \"\"\"\n if self._plot is None or not self._plot.is_active:\n raise RuntimeError(\"The signal needs to be plotted.\")\n norm = self._plot.signal_plot.ax_lines[0].norm\n minimum_intensity = self.data[self.data > 0].min() if norm == \"log\" else 0\n line_names = []\n segments = np.empty((len(xray_lines), 2, 2))\n offsets = np.empty((len(xray_lines), 2))\n # might want to set the intensity based on the alpha line intensity\n for i, xray_line in enumerate(xray_lines):\n element, line = utils_eds._get_element_and_line(xray_line)\n relative_factor = elements_db[element][\"Atomic_properties\"][\"Xray_lines\"][\n line\n ][\"weight\"]\n eng = self._get_line_energy(f\"{element}_{line}\")\n segments[i] = [[eng, 0], [eng, 1]]\n offsets[i] = [eng, 1]\n line_names.append(\n r\"$\\mathrm{%s}_{\\mathrm{%s}}$\"\n % utils_eds._get_element_and_line(xray_line)\n )\n\n line_markers = Lines(\n segments=segments,\n transform=\"relative\",\n color=\"black\",\n )\n text_markers = Texts(\n offsets=offsets,\n texts=line_names,\n offset_transform=\"relative\",\n rotation=np.pi / 2,\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n facecolor=\"black\",\n shift=0.005,\n )\n\n self.add_marker(line_markers, render_figure=False)\n self.add_marker(text_markers, render_figure=False)\n\n # Connect events to remove the markers when the line is closed\n line_markers.events.closed.connect(self._xray_marker_closed)\n text_markers.events.closed.connect(self._xray_marker_closed)\n self._xray_markers[\"lines\"] = line_markers\n self._xray_markers[\"texts\"] = text_markers\n self._xray_markers[\"names\"] = xray_lines\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _xray_marker_closed(self, obj):\n self._xray_markers = {}\n\n def remove_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Remove marker previously added on a spec.plot() with the name of the\n selected X-ray lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines to remove\n render_figure: bool\n If True, render the figure after removing the markers\n \"\"\"\n ind = np.where(np.isin(self._xray_markers[\"names\"], xray_lines))\n self._xray_markers[\"lines\"].remove_items(ind)\n self._xray_markers[\"texts\"].remove_items(ind)\n self._xray_markers[\"names\"] = np.delete(self._xray_markers[\"names\"], ind)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_background_windows_markers(self, windows_position, render_figure=True):\n \"\"\"\n Plot the background windows associated with each X-ray lines.\n\n For X-ray lines, a black line links the left and right window with the\n average value in each window.\n\n Parameters\n ----------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray lines. In a line, the two first value corresponds to the\n limit of the left window and the two last values corresponds to the\n limit of the right window.\n\n See also\n --------\n estimate_background_windows, get_lines_intensity\n \"\"\"\n self._add_vertical_lines_groups(windows_position)\n ax = self.axes_manager.signal_axes[0]\n segments = []\n for bw in windows_position:\n # TODO: test to prevent slicing bug. To be removed when fixed\n if ax.value2index(bw[0]) == ax.value2index(bw[1]):\n y1 = self.isig[bw[0]].data\n else:\n y1 = self.isig[bw[0] : bw[1]].mean(-1).data\n if ax.value2index(bw[2]) == ax.value2index(bw[3]):\n y2 = self.isig[bw[2]].data\n else:\n y2 = self.isig[bw[2] : bw[3]].mean(-1).data\n x1 = (bw[0] + bw[1]) / 2.0\n x2 = (bw[2] + bw[3]) / 2.0\n segments.append([[x1, y1[0]], [x2, y2[0]]])\n segments = np.array(segments)\n lines = Lines(segments=segments, color=\"black\")\n self.add_marker(lines, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])" }, { "identifier": "elements", "path": "exspy/misc/elements.py", "snippet": "" }, { "identifier": "utils", "path": "exspy/misc/eds/utils.py", "snippet": "_ABSORPTION_CORRECTION_DOCSTRING = \"\"\"absorption_correction : numpy.ndarray or None\n If None (default), absorption correction is ignored, otherwise, the\n array must contain values between 0 and 1 to correct the intensities\n based on estimated absorption.\n\"\"\"\n Z = elements_db[element][\"General_properties\"][\"Z\"]\n A = elements_db[element][\"General_properties\"][\"atomic_weight\"]\ndef _get_element_and_line(xray_line):\ndef _get_energy_xray_line(xray_line):\ndef _get_xray_lines_family(xray_line):\ndef _parse_only_lines(only_lines):\ndef get_xray_lines_near_energy(energy, width=0.2, only_lines=None):\ndef get_FWHM_at_Energy(energy_resolution_MnKa, E):\ndef xray_range(xray_line, beam_energy, density=\"auto\"):\ndef electron_range(element, beam_energy, density=\"auto\", tilt=0):\ndef take_off_angle(tilt_stage, azimuth_angle, elevation_angle, beta_tilt=0.0):\ndef xray_lines_model(\n elements,\n beam_energy=200,\n weight_percents=None,\n energy_resolution_MnKa=130,\n energy_axis=None,\n):\ndef quantification_cliff_lorimer(\n intensities, kfactors, absorption_correction=None, mask=None\n):\ndef _quantification_cliff_lorimer(\n intensities, kfactors, absorption_correction, ref_index=0, ref_index2=1\n):\ndef quantification_zeta_factor(intensities, zfactors, dose, absorption_correction=None):\ndef get_abs_corr_zeta(weight_percent, mass_thickness, take_off_angle):\ndef quantification_cross_section(\n intensities, cross_sections, dose, absorption_correction=None\n):\ndef get_abs_corr_cross_section(\n composition, number_of_atoms, take_off_angle, probe_area\n):\ndef edx_cross_section_to_zeta(cross_sections, elements):\ndef zeta_to_edx_cross_section(zfactors, elements):" } ]
import warnings import numpy as np import math import logging import hyperspy.components1d as create_component from hyperspy.misc.utils import stash_active_state from exspy.misc.eds.utils import _get_element_and_line from hyperspy.models.model1d import Model1D from exspy.signals.eds import EDSSpectrum from exspy.misc.elements import elements as elements_db from exspy.misc.eds import utils as utils_eds from hyperspy import utils
12,074
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. from __future__ import division _logger = logging.getLogger(__name__) eV2keV = 1000.0 sigma2fwhm = 2 * math.sqrt(2 * math.log(2)) def _get_weight(element, line, weight_line=None): if weight_line is None:
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. from __future__ import division _logger = logging.getLogger(__name__) eV2keV = 1000.0 sigma2fwhm = 2 * math.sqrt(2 * math.log(2)) def _get_weight(element, line, weight_line=None): if weight_line is None:
weight_line = elements_db[element]["Atomic_properties"]["Xray_lines"][line][
1
2023-10-28 20:04:10+00:00
16k
Sllambias/yucca
yucca/training/augmentation/YuccaAugmentationComposer.py
[ { "identifier": "get_max_rotated_size", "path": "yucca/image_processing/matrix_ops.py", "snippet": "def get_max_rotated_size(patch_size):\n if len(patch_size) == 2:\n max_dim = int(np.sqrt(patch_size[0] ** 2 + patch_size[1] ** 2))\n return (max_dim, max_dim)\n\n max_dim_0 = max(\n int(np.sqrt(patch_size[0] ** 2 + patch_size[1] ** 2)),\n int(np.sqrt(patch_size[0] ** 2 + patch_size[2] ** 2)),\n )\n\n max_dim_1 = max(\n int(np.sqrt(patch_size[1] ** 2 + patch_size[0] ** 2)),\n int(np.sqrt(patch_size[1] ** 2 + patch_size[2] ** 2)),\n )\n\n max_dim_2 = max(\n int(np.sqrt(patch_size[2] ** 2 + patch_size[0] ** 2)),\n int(np.sqrt(patch_size[2] ** 2 + patch_size[1] ** 2)),\n )\n\n return (max_dim_0, max_dim_1, max_dim_2)" }, { "identifier": "AddBatchDimension", "path": "yucca/image_processing/transforms/formatting.py", "snippet": "class AddBatchDimension(YuccaTransform):\n def __init__(self, data_key=\"image\", label_key=\"label\"):\n self.data_key = data_key\n self.label_key = label_key\n\n @staticmethod\n def get_params():\n pass\n\n def __unsqueeze__(self, data, label):\n data = data[np.newaxis]\n if label is None:\n return data, label\n if isinstance(label, list):\n label = [s[np.newaxis] for s in label]\n else:\n label = label[np.newaxis]\n return data, label\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n data_dict[self.data_key], data_dict[self.label_key] = self.__unsqueeze__(\n data_dict[self.data_key], data_dict[self.label_key]\n )\n return data_dict" }, { "identifier": "RemoveBatchDimension", "path": "yucca/image_processing/transforms/formatting.py", "snippet": "class RemoveBatchDimension(YuccaTransform):\n def __init__(self, data_key=\"image\", label_key=\"label\"):\n self.data_key = data_key\n self.label_key = label_key\n\n @staticmethod\n def get_params():\n pass\n\n def __squeeze__(self, data, label):\n data = data[0]\n if isinstance(label, list):\n label = [s[0] for s in label]\n else:\n label = label[0]\n return data, label\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n data_dict[self.data_key], data_dict[self.label_key] = self.__squeeze__(\n data_dict[self.data_key], data_dict[self.label_key]\n )\n return data_dict" }, { "identifier": "BiasField", "path": "yucca/image_processing/transforms/BiasField.py", "snippet": "class BiasField(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_biasField\n biasField_p_per_sample\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __biasField__(self, imageVolume):\n if len(imageVolume.shape) == 3:\n x, y, z = imageVolume.shape\n X, Y, Z = np.meshgrid(\n np.linspace(0, x, x, endpoint=False),\n np.linspace(0, y, y, endpoint=False),\n np.linspace(0, z, z, endpoint=False),\n indexing=\"ij\",\n )\n x0 = np.random.randint(0, x)\n y0 = np.random.randint(0, y)\n z0 = np.random.randint(0, z)\n G = 1 - (np.power((X - x0), 2) / (x**2) + np.power((Y - y0), 2) / (y**2) + np.power((Z - z0), 2) / (z**2))\n else:\n x, y = imageVolume.shape\n X, Y = np.meshgrid(\n np.linspace(0, x, x, endpoint=False),\n np.linspace(0, y, y, endpoint=False),\n indexing=\"ij\",\n )\n x0 = np.random.randint(0, x)\n y0 = np.random.randint(0, y)\n G = 1 - (np.power((X - x0), 2) / (x**2) + np.power((Y - y0), 2) / (y**2))\n return np.multiply(G, imageVolume)\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape. \\nShould be (b, c, x, y, z) or (b, c, x, y) and is:\\\n {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n data_dict[self.data_key][b, c] = self.__biasField__(data_dict[self.data_key][b, c])\n return data_dict" }, { "identifier": "Blur", "path": "yucca/image_processing/transforms/Blur.py", "snippet": "class Blur(YuccaTransform):\n \"\"\"\n WRAPPER FOR NNUNET AUGMENT GAMMA: https://github.com/MIC-DKFZ/batchgenerators/blob/8822a08a7dbfa4986db014e6a74b040778164ca6/batchgenerators/augmentations/color_augmentations.py\n\n Augments by changing 'gamma' of the image (same as gamma correction in photos or computer monitors\n\n :param gamma_range: range to sample gamma from. If one value is smaller than 1 and the other one is\n larger then half the samples will have gamma <1 and the other >1 (in the inverval that was specified).\n Tuple of float. If one value is < 1 and the other > 1 then half the images will be augmented with gamma values\n smaller than 1 and the other half with > 1\n :param invert_image: whether to invert the image before applying gamma augmentation\n :param retain_stats: Gamma transformation will alter the mean and std of the data in the patch. If retain_stats=True,\n the data will be transformed to match the mean and standard deviation before gamma augmentation. retain_stats\n can also be callable (signature retain_stats() -> bool)\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, p_per_channel=0.5, sigma=(0.5, 1.0)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.p_per_channel = p_per_channel\n self.sigma = sigma\n\n @staticmethod\n def get_params(sigma: Tuple[float]):\n sigma = np.random.uniform(*sigma)\n return sigma\n\n def __blur__(self, imageVolume, sigma):\n for c in range(imageVolume.shape[0]):\n if np.random.uniform() < self.p_per_channel:\n imageVolume[c] = gaussian_filter(imageVolume[c], sigma, order=0)\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n sigma = self.get_params(self.sigma)\n data_dict[self.data_key][b] = self.__blur__(data_dict[self.data_key][b], sigma)\n return data_dict" }, { "identifier": "CopyImageToSeg", "path": "yucca/image_processing/transforms/CopyImageToSeg.py", "snippet": "class CopyImageToSeg(YuccaTransform):\n \"\"\"\n variables in CopyImageToSeg\n data_key\n label_key\n\n \"\"\"\n\n def __init__(self, copy=False, data_key=\"image\", label_key=\"label\"):\n self.copy = copy\n self.data_key = data_key\n self.label_key = label_key\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __copy__(self, imageVolume):\n return imageVolume, imageVolume.copy()\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n if self.copy:\n data_dict[self.data_key], data_dict[self.label_key] = self.__copy__(data_dict[self.data_key])\n return data_dict" }, { "identifier": "Gamma", "path": "yucca/image_processing/transforms/Gamma.py", "snippet": "class Gamma(YuccaTransform):\n \"\"\"\n WRAPPER FOR NNUNET AUGMENT GAMMA: https://github.com/MIC-DKFZ/batchgenerators/blob/8822a08a7dbfa4986db014e6a74b040778164ca6/batchgenerators/augmentations/color_augmentations.py\n\n Augments by changing 'gamma' of the image (same as gamma correction in photos or computer monitors\n\n :param gamma_range: range to sample gamma from. If one value is smaller than 1 and the other one is\n larger then half the samples will have gamma <1 and the other >1 (in the inverval that was specified).\n Tuple of float. If one value is < 1 and the other > 1 then half the images will be augmented with gamma values\n smaller than 1 and the other half with > 1\n :param invert_image: whether to invert the image before applying gamma augmentation\n :param retain_stats: Gamma transformation will alter the mean and std of the data in the patch. If retain_stats=True,\n the data will be transformed to match the mean and standard deviation before gamma augmentation. retain_stats\n can also be callable (signature retain_stats() -> bool)\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n p_per_sample=1,\n p_invert_image=0.05,\n gamma_range=(0.5, 2.0),\n per_channel=True,\n ):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.gamma_range = gamma_range\n self.p_invert_image = p_invert_image\n self.per_channel = per_channel\n\n @staticmethod\n def get_params(p_invert_image):\n # No parameters to retrieve\n do_invert = False\n if np.random.uniform() < p_invert_image:\n do_invert = True\n return do_invert\n\n def __gamma__(self, imageVolume, gamma_range, invert_image, per_channel):\n return augment_gamma(imageVolume, gamma_range, invert_image, per_channel, retain_stats=False)\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n do_invert = self.get_params(self.p_invert_image)\n data_dict[self.data_key][b] = self.__gamma__(\n data_dict[self.data_key][b],\n self.gamma_range,\n do_invert,\n per_channel=self.per_channel,\n )\n return data_dict" }, { "identifier": "MotionGhosting", "path": "yucca/image_processing/transforms/Ghosting.py", "snippet": "class MotionGhosting(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_motionGhosting\n motionGhosting_p_per_sample\n motionGhosting_alpha\n motionGhosting_numReps\n motionGhosting_axes\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n p_per_sample=1,\n alpha=(0.85, 0.95),\n numReps=(2, 5),\n axes=(0, 3),\n ):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.alpha = alpha\n self.numReps = numReps\n self.axes = axes\n\n @staticmethod\n def get_params(alpha: Tuple[float], numReps: Tuple[float], axes: Tuple[float]) -> Tuple[float]:\n alpha = np.random.uniform(*alpha)\n numReps = np.random.randint(*numReps)\n axis = np.random.randint(*axes)\n return alpha, numReps, axis\n\n def __motionGhosting__(self, imageVolume, alpha, numReps, axis):\n if len(imageVolume.shape) == 3:\n assert axis in [0, 1, 2], \"Incorrect or no axis\"\n\n h, w, d = imageVolume.shape\n\n imageVolume = np.fft.fftn(imageVolume, s=[h, w, d])\n\n if axis == 0:\n imageVolume[0:-1:numReps, :, :] = alpha * imageVolume[0:-1:numReps, :, :]\n elif axis == 1:\n imageVolume[:, 0:-1:numReps, :] = alpha * imageVolume[:, 0:-1:numReps, :]\n else:\n imageVolume[:, :, 0:-1:numReps] = alpha * imageVolume[:, :, 0:-1:numReps]\n\n imageVolume = abs(np.fft.ifftn(imageVolume, s=[h, w, d]))\n if len(imageVolume.shape) == 2:\n assert axis in [0, 1], \"Incorrect or no axis\"\n h, w = imageVolume.shape\n imageVolume = np.fft.fftn(imageVolume, s=[h, w])\n\n if axis == 0:\n imageVolume[0:-1:numReps, :] = alpha * imageVolume[0:-1:numReps, :]\n else:\n imageVolume[:, 0:-1:numReps] = alpha * imageVolume[:, 0:-1:numReps]\n imageVolume = abs(np.fft.ifftn(imageVolume, s=[h, w]))\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n alpha, numReps, axis = self.get_params(self.alpha, self.numReps, self.axes)\n data_dict[self.data_key][b, c] = self.__motionGhosting__(\n data_dict[self.data_key][b, c], alpha, numReps, axis\n )\n return data_dict" }, { "identifier": "Masking", "path": "yucca/image_processing/transforms/Masking.py", "snippet": "class Masking(YuccaTransform):\n \"\"\"\n CURRENTLY NOT IMPLEMENTED\n \"\"\"\n\n def __init__(self, mask=False, data_key=\"image\", mask_ratio: tuple | float = 0.25):\n self.mask = mask\n self.data_key = data_key\n self.mask_ratio = mask_ratio\n\n @staticmethod\n def get_params(shape, ratio, start_idx):\n pass\n\n def __mask__(self, image, label, crop_start_idx):\n pass\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n if self.mask:\n raise NotImplementedError(\"Masking is not implemented yet. It should not be enabled\")\n return data_dict" }, { "identifier": "Mirror", "path": "yucca/image_processing/transforms/Mirror.py", "snippet": "class Mirror(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_multiplicativeNoise\n multiplicativeNoise_p_per_sample\n multiplicativeNoise_mean\n multiplicativeNoise_sigma\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n label_key=\"label\",\n p_per_sample=1,\n axes=(0, 1, 2),\n p_mirror_per_axis=0.33,\n skip_label=False,\n ):\n self.data_key = data_key\n self.label_key = label_key\n self.p_per_sample = p_per_sample\n self.p_mirror_per_axis = p_mirror_per_axis\n self.axes = axes\n self.skip_label = skip_label\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __mirror__(self, imageVolume, labelVolume, axes):\n # Input will be [c, x, y, z] or [c, x, y]\n if 0 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :] = imageVolume[:, ::-1]\n labelVolume[:, :] = labelVolume[:, ::-1]\n if 1 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :] = imageVolume[:, :, ::-1]\n labelVolume[:, :, :] = labelVolume[:, :, ::-1]\n if 2 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :, :] = imageVolume[:, :, :, ::-1]\n labelVolume[:, :, :, :] = labelVolume[:, :, :, ::-1]\n return imageVolume, labelVolume\n\n def __mirrorimage__(self, imageVolume, axes):\n # Input will be [c, x, y, z] or [c, x, y]\n if 0 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :] = imageVolume[:, ::-1]\n if 1 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :] = imageVolume[:, :, ::-1]\n if 2 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :, :] = imageVolume[:, :, :, ::-1]\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n if self.skip_label:\n data_dict[self.data_key][b] = self.__mirrorimage__(data_dict[self.data_key][b], self.axes)\n else:\n (\n data_dict[self.data_key][b],\n data_dict[self.label_key][b],\n ) = self.__mirror__(\n data_dict[self.data_key][b],\n data_dict[self.label_key][b],\n self.axes,\n )\n return data_dict" }, { "identifier": "AdditiveNoise", "path": "yucca/image_processing/transforms/Noise.py", "snippet": "class AdditiveNoise(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_additiveNoise\n additiveNoise_p_per_sample\n additiveNoise_mean\n additiveNoise_sigma\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, mean=(0.0, 0.0), sigma=(1e-3, 1e-4)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.mean = mean\n self.sigma = sigma\n\n @staticmethod\n def get_params(mean: Tuple[float], sigma: Tuple[float]) -> Tuple[float]:\n mean = float(np.random.uniform(*mean))\n sigma = float(np.random.uniform(*sigma))\n return mean, sigma\n\n def __additiveNoise__(self, imageVolume, mean, sigma):\n # J = I+n\n gauss = np.random.normal(mean, sigma, imageVolume.shape)\n return imageVolume + gauss\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (c, x, y, z) or (c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n mean, sigma = self.get_params(self.mean, self.sigma)\n if np.random.uniform() < self.p_per_sample:\n data_dict[self.data_key][b, c] = self.__additiveNoise__(data_dict[self.data_key][b, c], mean, sigma)\n return data_dict" }, { "identifier": "MultiplicativeNoise", "path": "yucca/image_processing/transforms/Noise.py", "snippet": "class MultiplicativeNoise(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_multiplicativeNoise\n multiplicativeNoise_p_per_sample\n multiplicativeNoise_mean\n multiplicativeNoise_sigma\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, mean=(0.0, 0.0), sigma=(1e-3, 1e-4)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.mean = mean\n self.sigma = sigma\n\n @staticmethod\n def get_params(mean: Tuple[float], sigma: Tuple[float]) -> Tuple[float]:\n mean = float(np.random.uniform(*mean))\n sigma = float(np.random.uniform(*sigma))\n return mean, sigma\n\n def __multiplicativeNoise__(self, imageVolume, mean, sigma):\n # J = I + I*n\n gauss = np.random.normal(mean, sigma, imageVolume.shape)\n return imageVolume + imageVolume * gauss\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n mean, sigma = self.get_params(self.mean, self.sigma)\n data_dict[self.data_key][b, c] = self.__multiplicativeNoise__(data_dict[self.data_key][b, c], mean, sigma)\n return data_dict" }, { "identifier": "GibbsRinging", "path": "yucca/image_processing/transforms/Ringing.py", "snippet": "class GibbsRinging(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_gibbsRinging\n gibbsRinging_p_per_sample\n gibbsRinging_cutFreq\n gibbsRinging_axes\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, cutFreq=(96, 129), axes=(0, 3)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.cutFreq = cutFreq\n self.axes = axes\n\n @staticmethod\n def get_params(cutFreq, axes):\n cutFreq = np.random.randint(*cutFreq)\n axis = np.random.randint(*axes)\n return cutFreq, axis\n\n def __gibbsRinging__(self, imageVolume, numSample, axis):\n if len(imageVolume.shape) == 3:\n assert axis in [0, 1, 2], \"Incorrect or no axis\"\n\n h, w, d = imageVolume.shape\n if axis == 0:\n imageVolume = imageVolume.transpose(0, 2, 1)\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[h, d, w]))\n imageVolume[:, :, 0 : int(np.ceil(w / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, :, int(np.ceil(w / 2) + np.ceil(numSample / 2)) : w] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[h, d, w]))\n imageVolume = imageVolume.transpose(0, 2, 1)\n elif axis == 1:\n imageVolume = imageVolume.transpose(1, 2, 0)\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[w, d, h]))\n imageVolume[:, :, 0 : int(np.ceil(h / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, :, int(np.ceil(h / 2) + np.ceil(numSample / 2)) : h] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[w, d, h]))\n imageVolume = imageVolume.transpose(2, 0, 1)\n else:\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[h, w, d]))\n imageVolume[:, :, 0 : int(np.ceil(d / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, :, int(np.ceil(d / 2) + np.ceil(numSample / 2)) : d] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[h, w, d]))\n elif len(imageVolume.shape) == 2:\n assert axis in [0, 1], \"incorrect or no axis\"\n h, w = imageVolume.shape\n if axis == 0:\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[h, w]))\n imageVolume[:, 0 : int(np.ceil(w / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, int(np.ceil(w / 2) + np.ceil(numSample / 2)) : w] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[h, w]))\n else:\n imageVolume = imageVolume.conj().T\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[w, h]))\n imageVolume[:, 0 : int(np.ceil(h / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, int(np.ceil(h / 2) + np.ceil(numSample / 2)) : h] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[w, h]))\n imageVolume = imageVolume.conj().T\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n cutFreq, axis = self.get_params(self.cutFreq, self.axes)\n data_dict[self.data_key][b, c] = self.__gibbsRinging__(data_dict[self.data_key][b, c], cutFreq, axis)\n return data_dict" }, { "identifier": "DownsampleSegForDS", "path": "yucca/image_processing/transforms/sampling.py", "snippet": "class DownsampleSegForDS(YuccaTransform):\n \"\"\" \"\"\"\n\n def __init__(self, deep_supervision: bool = False, label_key=\"label\", factors=(1, 0.5, 0.25, 0.125, 0.0625)):\n self.deep_supervision = deep_supervision\n self.label_key = label_key\n self.factors = factors\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __downsample__(self, label, factors):\n orig_type = label.dtype\n orig_shape = label.shape\n downsampled_labels = []\n for factor in factors:\n target_shape = np.array(orig_shape).astype(int)\n for i in range(2, len(orig_shape)):\n target_shape[i] *= factor\n if np.all(target_shape == orig_shape):\n downsampled_labels.append(label)\n else:\n canvas = np.zeros(target_shape)\n for b in range(label.shape[0]):\n for c in range(label[b].shape[0]):\n canvas[b, c] = resize(\n label[b, c].astype(float),\n target_shape[2:],\n 0,\n mode=\"edge\",\n clip=True,\n anti_aliasing=False,\n ).astype(orig_type)\n downsampled_labels.append(canvas)\n return downsampled_labels\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n if self.deep_supervision:\n data_dict[self.label_key] = self.__downsample__(data_dict[self.label_key], self.factors)\n return data_dict" }, { "identifier": "SimulateLowres", "path": "yucca/image_processing/transforms/SimulateLowres.py", "snippet": "class SimulateLowres(YuccaTransform):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n p_per_sample=1,\n p_per_channel=0.5,\n p_per_axis=0.33,\n zoom_range=(0.5, 1.0),\n ):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.p_per_channel = p_per_channel\n self.p_per_axis = p_per_axis\n self.zoom_range = zoom_range\n\n @staticmethod\n def get_params(zoom_range, shape, p_per_axis):\n # No parameters to retrieve\n if isinstance(shape, (list, tuple)):\n shape = np.array(shape)\n zoom = np.random.uniform(*zoom_range)\n dim = len(shape)\n zoomed_shape = np.round(shape * zoom).astype(int)\n for i in range(dim):\n if np.random.uniform() < p_per_axis:\n shape[i] = zoomed_shape[i]\n return shape\n\n def __simulatelowres__(self, imageVolume, target_shape):\n shape = imageVolume.shape\n downsampled = resize(\n imageVolume.astype(float),\n target_shape,\n order=0,\n mode=\"edge\",\n anti_aliasing=False,\n )\n imageVolume = resize(downsampled, shape, order=3, mode=\"edge\", anti_aliasing=False)\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_channel:\n target_shape = self.get_params(\n self.zoom_range,\n data_dict[self.data_key][b, c].shape,\n self.p_per_axis,\n )\n data_dict[self.data_key][b, c] = self.__simulatelowres__(data_dict[self.data_key][b, c], target_shape)\n return data_dict" }, { "identifier": "Spatial", "path": "yucca/image_processing/transforms/Spatial.py", "snippet": "class Spatial(YuccaTransform):\n \"\"\"\n variables in aug_params:\n do_Rotation\n Rotation_p_per_sample\n Rotation_p_per_channel\n Rotation_x_rot\n Rotation_y_rot\n Rotation_z_rot\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n label_key=\"label\",\n crop=False,\n patch_size: Tuple[int] = None,\n random_crop=True,\n p_deform_per_sample=1,\n deform_sigma=(20, 30),\n deform_alpha=(300, 600),\n p_rot_per_sample=1,\n p_rot_per_axis=1,\n x_rot_in_degrees=(0.0, 10.0),\n y_rot_in_degrees=(0.0, 10.0),\n z_rot_in_degrees=(0.0, 10.0),\n p_scale_per_sample=1,\n scale_factor=(0.85, 1.15),\n skip_label=False,\n ):\n self.data_key = data_key\n self.label_key = label_key\n self.skip_label = skip_label\n self.do_crop = crop\n self.patch_size = patch_size\n self.random_crop = random_crop\n\n self.p_deform_per_sample = p_deform_per_sample\n self.deform_sigma = deform_sigma\n self.deform_alpha = deform_alpha\n\n self.p_rot_per_sample = p_rot_per_sample\n self.p_rot_per_axis = p_rot_per_axis\n self.x_rot_in_degrees = x_rot_in_degrees\n self.y_rot_in_degrees = y_rot_in_degrees\n self.z_rot_in_degrees = z_rot_in_degrees\n\n self.p_scale_per_sample = p_scale_per_sample\n self.scale_factor = scale_factor\n\n @staticmethod\n def get_params(\n deform_alpha: Tuple[float],\n deform_sigma: Tuple[float],\n x_rot: Tuple[float],\n y_rot: Tuple[float],\n z_rot: Tuple[float],\n scale_factor: Tuple[float],\n ) -> Tuple[float]:\n if deform_alpha:\n deform_alpha = float(np.random.uniform(*deform_alpha))\n if deform_sigma:\n deform_sigma = float(np.random.uniform(*deform_sigma))\n\n if x_rot:\n x_rot = float(np.random.uniform(*x_rot)) * (np.pi / 180)\n if y_rot:\n y_rot = float(np.random.uniform(*y_rot)) * (np.pi / 180)\n if z_rot:\n z_rot = float(np.random.uniform(*z_rot)) * (np.pi / 180)\n\n if scale_factor:\n scale_factor = float(np.random.uniform(*scale_factor))\n\n return deform_alpha, deform_sigma, x_rot, y_rot, z_rot, scale_factor\n\n def __CropDeformRotateScale__(\n self,\n imageVolume,\n labelVolume,\n patch_size,\n alpha,\n sigma,\n x_rot,\n y_rot,\n z_rot,\n scale_factor,\n skip_label,\n ):\n if not self.do_crop:\n patch_size = imageVolume.shape[2:]\n\n coords = create_zero_centered_coordinate_matrix(patch_size)\n imageCanvas = np.zeros((imageVolume.shape[0], imageVolume.shape[1], *patch_size), dtype=np.float32)\n\n # First we apply deformation to the coordinate matrix\n if np.random.uniform() < self.p_deform_per_sample:\n coords = deform_coordinate_matrix(coords, alpha=alpha, sigma=sigma)\n\n # Then we rotate the coordinate matrix around one or more axes\n if np.random.uniform() < self.p_rot_per_sample:\n rot_matrix = np.eye(len(patch_size))\n if len(patch_size) == 2:\n rot_matrix = np.dot(rot_matrix, Rz2D(z_rot))\n else:\n if np.random.uniform() < self.p_rot_per_axis:\n rot_matrix = np.dot(rot_matrix, Rx(x_rot))\n if np.random.uniform() < self.p_rot_per_axis:\n rot_matrix = np.dot(rot_matrix, Ry(y_rot))\n if np.random.uniform() < self.p_rot_per_axis:\n rot_matrix = np.dot(rot_matrix, Rz(z_rot))\n\n coords = np.dot(coords.reshape(len(patch_size), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)\n\n # And finally scale it\n # Scaling effect is \"inverted\"\n # i.e. a scale factor of 0.9 will zoom in\n if np.random.uniform() < self.p_scale_per_sample:\n coords *= scale_factor\n\n if self.random_crop and self.do_crop:\n for d in range(len(patch_size)):\n crop_center_idx = [\n np.random.randint(\n int(patch_size[d] / 2),\n imageVolume.shape[d + 2] - int(patch_size[d] / 2) + 1,\n )\n ]\n coords[d] += crop_center_idx\n else:\n # Reversing the zero-centering of the coordinates\n for d in range(len(patch_size)):\n coords[d] += imageVolume.shape[d + 2] / 2.0 - 0.5\n\n # Mapping the images to the distorted coordinates\n for b in range(imageVolume.shape[0]):\n for c in range(imageVolume.shape[1]):\n imageCanvas[b, c] = map_coordinates(\n imageVolume[b, c].astype(float),\n coords,\n order=3,\n mode=\"constant\",\n cval=0.0,\n ).astype(imageVolume.dtype)\n\n if not skip_label:\n labelCanvas = np.zeros(\n (labelVolume.shape[0], labelVolume.shape[1], *patch_size),\n dtype=np.float32,\n )\n\n # Mapping the labelmentations to the distorted coordinates\n for b in range(labelVolume.shape[0]):\n for c in range(labelVolume.shape[1]):\n labelCanvas[b, c] = map_coordinates(labelVolume[b, c], coords, order=0, mode=\"constant\", cval=0.0).astype(\n labelVolume.dtype\n )\n return imageCanvas, labelCanvas\n return imageCanvas, labelVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n\t\t\t\\nShould be (c, x, y, z) or (c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n (\n deform_alpha,\n deform_sigma,\n x_rot_rad,\n y_rot_rad,\n z_rot_rad,\n scale_factor,\n ) = self.get_params(\n deform_alpha=self.deform_alpha,\n deform_sigma=self.deform_sigma,\n x_rot=self.x_rot_in_degrees,\n y_rot=self.y_rot_in_degrees,\n z_rot=self.z_rot_in_degrees,\n scale_factor=self.scale_factor,\n )\n\n (\n data_dict[self.data_key],\n data_dict[self.label_key],\n ) = self.__CropDeformRotateScale__(\n data_dict[self.data_key],\n data_dict[self.label_key],\n self.patch_size,\n deform_alpha,\n deform_sigma,\n x_rot_rad,\n y_rot_rad,\n z_rot_rad,\n scale_factor,\n self.skip_label,\n )\n return data_dict" }, { "identifier": "find_optimal_tensor_dims", "path": "yucca/network_architectures/utils/model_memory_estimation.py", "snippet": "def find_optimal_tensor_dims(\n dimensionality,\n num_classes,\n modalities,\n model_name,\n max_patch_size,\n fixed_patch_size: tuple | list = None,\n fixed_batch_size: tuple | list = None,\n max_memory_usage_in_gb=None,\n):\n if max_memory_usage_in_gb is None:\n try:\n gpu_vram_in_gb = int(torch.cuda.get_device_properties(0).total_memory / 1024**2 * 0.001)\n except RuntimeError:\n gpu_vram_in_gb = 12\n # Don't wanna utilize more than 12GB, to ensure epoch times are kept relatively low\n max_memory_usage_in_gb = min(12, gpu_vram_in_gb)\n\n # Use this offset to factor the overhead from CUDA and other libraries taking a substantial amount of VRAM\n offset = 2.5\n\n OOM_OR_MAXED = False\n final_batch_size = None\n final_patch_size = None\n\n if dimensionality == \"2D\":\n if len(max_patch_size) == 3:\n max_patch_size = max_patch_size[1:]\n conv = nn.Conv2d\n dropout = nn.Dropout2d\n norm = nn.InstanceNorm2d\n batch_size = 16\n max_batch_size = 512\n patch_size = [32, 32] if not model_name == \"UNetR\" else [64, 64]\n if dimensionality == \"3D\":\n conv = nn.Conv3d\n dropout = nn.Dropout3d\n norm = nn.InstanceNorm3d\n batch_size = 2\n max_batch_size = 2\n patch_size = [32, 32, 32] if not model_name == \"UNetR\" else [64, 64, 64]\n\n if fixed_batch_size:\n batch_size = fixed_batch_size\n max_batch_size = fixed_batch_size\n\n absolute_max = 128**3\n\n model = recursive_find_python_class(\n folder=[join(yucca.__path__[0], \"network_architectures\")],\n class_name=model_name,\n current_module=\"yucca.network_architectures\",\n )\n model_kwargs = {\n \"input_channels\": modalities,\n \"num_classes\": num_classes,\n \"conv_op\": conv,\n \"patch_size\": patch_size,\n \"dropout_op\": dropout,\n \"norm_op\": norm,\n }\n model_kwargs = filter_kwargs(model, model_kwargs)\n model = model(**model_kwargs)\n\n est = 0\n idx = 0\n maxed_idxs = []\n if fixed_patch_size is not None:\n patch_size = fixed_patch_size\n # first fix dimensions so they are divisible by 16 (otherwise issues with standard pools and strides)\n patch_size = [math.ceil(i / 16) * 16 for i in patch_size]\n max_patch_size = patch_size\n while not OOM_OR_MAXED:\n try:\n if np.prod(patch_size) >= absolute_max:\n max_patch_size = patch_size\n\n inp = torch.zeros((batch_size, modalities, *patch_size))\n est = estimate_memory_training(model, inp)\n\n # If estimated usage is still within acceptable bounds we set the (maybe temporary) final dimensions\n if est < max_memory_usage_in_gb - offset:\n final_batch_size = batch_size\n final_patch_size = tuple(patch_size)\n else:\n OOM_OR_MAXED = True\n\n if patch_size[idx] + 16 < max_patch_size[idx]:\n patch_size[idx] += 16\n if model_name == \"UNetR\": # we need to re-instantiate it because of the ViT\n model = recursive_find_python_class(\n folder=[join(yucca.__path__[0], \"network_architectures\")],\n class_name=model_name,\n current_module=\"yucca.network_architectures\",\n )\n model = model(**model_kwargs)\n\n if idx < len(patch_size) - 1:\n idx += 1\n else:\n idx = 0\n else:\n # here we mark that one dimension has been maxed out\n if idx not in maxed_idxs:\n maxed_idxs.append(idx)\n # if not all dimensions are maxed out for the patch_size,\n # we try the next dimension\n if not len(maxed_idxs) == len(patch_size):\n if idx < len(patch_size) - 1:\n idx += 1\n else:\n idx = 0\n\n # when all dimensions of the patch are maxed\n # we try increasing the batch_size instead\n if len(maxed_idxs) == len(patch_size):\n # Unless batch_size is maxed\n if not max_batch_size > batch_size:\n final_batch_size = batch_size\n final_patch_size = tuple(patch_size)\n OOM_OR_MAXED = True\n if len(patch_size) == 3:\n batch_size += 2\n else:\n batch_size += 8\n except torch.cuda.OutOfMemoryError:\n OOM_OR_MAXED = True\n if final_batch_size is None or final_batch_size is None:\n print(\n \"\\n\"\n \"Final batch and/or patch size was not found. \\n\"\n \"This is likely caused by supplying large fixed parameters causing (or almost causing) OOM errors. \\n\"\n \"Will attempt to run with supplied parameters, but this might cause issues.\"\n )\n print(\n f\"Estimated GPU memory usage for parameters is: {est}GB and the max requested vram is: {max_memory_usage_in_gb-offset}GB. \\n\"\n f\"This includes an offset of {offset}GB to account for vram used by PyTorch and CUDA. \\n\"\n \"Consider increasing the max vram or working with a smaller batch and/or patch size.\"\n \"\\n\"\n )\n if final_batch_size is None:\n final_batch_size = batch_size\n if final_patch_size is None:\n final_patch_size = tuple(patch_size)\n return final_batch_size, final_patch_size" } ]
from torchvision import transforms from yucca.image_processing.matrix_ops import get_max_rotated_size from yucca.image_processing.transforms.formatting import ( AddBatchDimension, RemoveBatchDimension, ) from yucca.image_processing.transforms.BiasField import BiasField from yucca.image_processing.transforms.Blur import Blur from yucca.image_processing.transforms.CopyImageToSeg import CopyImageToSeg from yucca.image_processing.transforms.Gamma import Gamma from yucca.image_processing.transforms.Ghosting import MotionGhosting from yucca.image_processing.transforms.Masking import Masking from yucca.image_processing.transforms.Mirror import Mirror from yucca.image_processing.transforms.Noise import ( AdditiveNoise, MultiplicativeNoise, ) from yucca.image_processing.transforms.Ringing import GibbsRinging from yucca.image_processing.transforms.sampling import DownsampleSegForDS from yucca.image_processing.transforms.SimulateLowres import SimulateLowres from yucca.image_processing.transforms.Spatial import Spatial from yucca.network_architectures.utils.model_memory_estimation import ( find_optimal_tensor_dims, )
13,389
self.gamma_range = (0.5, 2.0) self.gibbs_ringing_p_per_sample = 0.2 self.gibbs_ringing_cutfreq = (96, 129) self.gibbs_ringing_axes = (0, 2) if is_2d else (0, 3) self.mirror_p_per_sample = 0.0 self.mirror_p_per_axis = 0.33 self.mirror_axes = (0, 1) if is_2d else (0, 1, 2) self.motion_ghosting_p_per_sample = 0.2 self.motion_ghosting_alpha = (0.85, 0.95) self.motion_ghosting_numreps = (2, 11) self.motion_ghosting_axes = (0, 2) if is_2d else (0, 3) self.multiplicative_noise_p_per_sample = 0.2 self.multiplicative_noise_mean = (0, 0) self.multiplicative_noise_sigma = (1e-3, 1e-4) self.rotation_p_per_sample = 0.2 self.rotation_p_per_axis = 0.66 self.rotation_x = (-30.0, 30.0) self.rotation_y = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.rotation_z = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.scale_p_per_sample = 0.2 self.scale_factor = (0.9, 1.1) self.simulate_lowres_p_per_sample = 0.2 self.simulate_lowres_p_per_channel = 0.5 self.simulate_lowres_p_per_axis = 0.33 self.simulate_lowres_zoom_range = (0.5, 1.0) @property def pre_aug_patch_size(self): # First check if any spatial transforms are included if self.elastic_deform_p_per_sample > 0 or self.rotation_p_per_sample > 0 or self.scale_p_per_sample > 0: self._pre_aug_patch_size = get_max_rotated_size(self.patch_size) return self._pre_aug_patch_size def apply_task_type_specific_preset(self, task_type_preset): if task_type_preset == "classification": self.skip_label = True if task_type_preset == "unsupervised": self.skip_label = True self.copy_image_to_label = True # This should be uncommented when masking is properly implemented # augmentation_parameter_dict["mask_image_for_reconstruction"] = True def overwrite_params(self, parameter_dict): for key, value in parameter_dict.items(): setattr(self, key, value) def compose_train_transforms(self): tr_transforms = transforms.Compose( [ AddBatchDimension(), Spatial( patch_size=self.patch_size, crop=True, random_crop=self.random_crop, p_deform_per_sample=self.elastic_deform_p_per_sample, deform_sigma=self.elastic_deform_sigma, deform_alpha=self.elastic_deform_alpha, p_rot_per_sample=self.rotation_p_per_sample, p_rot_per_axis=self.rotation_p_per_axis, x_rot_in_degrees=self.rotation_x, y_rot_in_degrees=self.rotation_y, z_rot_in_degrees=self.rotation_z, p_scale_per_sample=self.scale_p_per_sample, scale_factor=self.scale_factor, skip_label=self.skip_label, ), AdditiveNoise( p_per_sample=self.additive_noise_p_per_sample, mean=self.additive_noise_mean, sigma=self.additive_noise_sigma, ), Blur( p_per_sample=self.blurring_p_per_sample, p_per_channel=self.blurring_p_per_channel, sigma=self.blurring_sigma, ), MultiplicativeNoise( p_per_sample=self.multiplicative_noise_p_per_sample, mean=self.multiplicative_noise_mean, sigma=self.multiplicative_noise_sigma, ), MotionGhosting( p_per_sample=self.motion_ghosting_p_per_sample, alpha=self.motion_ghosting_alpha, numReps=self.motion_ghosting_numreps, axes=self.motion_ghosting_axes, ), GibbsRinging( p_per_sample=self.gibbs_ringing_p_per_sample, cutFreq=self.gibbs_ringing_cutfreq, axes=self.gibbs_ringing_axes, ), SimulateLowres( p_per_sample=self.simulate_lowres_p_per_sample, p_per_channel=self.simulate_lowres_p_per_channel, p_per_axis=self.simulate_lowres_p_per_axis, zoom_range=self.simulate_lowres_zoom_range, ), BiasField(p_per_sample=self.biasfield_p_per_sample), Gamma( p_per_sample=self.gamma_p_per_sample, p_invert_image=self.gamma_p_invert_image, gamma_range=self.gamma_range, ), Mirror( p_per_sample=self.mirror_p_per_sample, axes=self.mirror_axes, p_mirror_per_axis=self.mirror_p_per_axis, skip_label=self.skip_label, ), DownsampleSegForDS(deep_supervision=self.deep_supervision), CopyImageToSeg(copy=self.copy_image_to_label),
class YuccaAugmentationComposer: def __init__( self, patch_size: list | tuple, deep_supervision: bool = False, is_2D: bool = False, parameter_dict: dict = {}, task_type_preset: str = None, ): self._pre_aug_patch_size = None self.deep_supervision = deep_supervision self.setup_default_params(is_2D, patch_size) self.apply_task_type_specific_preset(task_type_preset) self.overwrite_params(parameter_dict) self.train_transforms = self.compose_train_transforms() self.val_transforms = self.compose_val_transforms() def setup_default_params(self, is_2d, patch_size): print("Composing Transforms") # Define whether we crop before or after applying augmentations # Define if cropping is random or always centered self.random_crop = True self.mask_image_for_reconstruction = False self.patch_size = patch_size # label/segmentation transforms self.skip_label = False self.label_dtype = int self.copy_image_to_label = False self.additive_noise_p_per_sample = 0.2 self.additive_noise_mean = (0.0, 0.0) self.additive_noise_sigma = (1e-3, 1e-4) self.biasfield_p_per_sample = 0.33 self.blurring_p_per_sample = 0.2 self.blurring_sigma = (0.0, 1.0) self.blurring_p_per_channel = 0.5 self.elastic_deform_p_per_sample = 0.33 self.elastic_deform_alpha = (200, 600) self.elastic_deform_sigma = (20, 30) self.gamma_p_per_sample = 0.2 self.gamma_p_invert_image = 0.05 self.gamma_range = (0.5, 2.0) self.gibbs_ringing_p_per_sample = 0.2 self.gibbs_ringing_cutfreq = (96, 129) self.gibbs_ringing_axes = (0, 2) if is_2d else (0, 3) self.mirror_p_per_sample = 0.0 self.mirror_p_per_axis = 0.33 self.mirror_axes = (0, 1) if is_2d else (0, 1, 2) self.motion_ghosting_p_per_sample = 0.2 self.motion_ghosting_alpha = (0.85, 0.95) self.motion_ghosting_numreps = (2, 11) self.motion_ghosting_axes = (0, 2) if is_2d else (0, 3) self.multiplicative_noise_p_per_sample = 0.2 self.multiplicative_noise_mean = (0, 0) self.multiplicative_noise_sigma = (1e-3, 1e-4) self.rotation_p_per_sample = 0.2 self.rotation_p_per_axis = 0.66 self.rotation_x = (-30.0, 30.0) self.rotation_y = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.rotation_z = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.scale_p_per_sample = 0.2 self.scale_factor = (0.9, 1.1) self.simulate_lowres_p_per_sample = 0.2 self.simulate_lowres_p_per_channel = 0.5 self.simulate_lowres_p_per_axis = 0.33 self.simulate_lowres_zoom_range = (0.5, 1.0) @property def pre_aug_patch_size(self): # First check if any spatial transforms are included if self.elastic_deform_p_per_sample > 0 or self.rotation_p_per_sample > 0 or self.scale_p_per_sample > 0: self._pre_aug_patch_size = get_max_rotated_size(self.patch_size) return self._pre_aug_patch_size def apply_task_type_specific_preset(self, task_type_preset): if task_type_preset == "classification": self.skip_label = True if task_type_preset == "unsupervised": self.skip_label = True self.copy_image_to_label = True # This should be uncommented when masking is properly implemented # augmentation_parameter_dict["mask_image_for_reconstruction"] = True def overwrite_params(self, parameter_dict): for key, value in parameter_dict.items(): setattr(self, key, value) def compose_train_transforms(self): tr_transforms = transforms.Compose( [ AddBatchDimension(), Spatial( patch_size=self.patch_size, crop=True, random_crop=self.random_crop, p_deform_per_sample=self.elastic_deform_p_per_sample, deform_sigma=self.elastic_deform_sigma, deform_alpha=self.elastic_deform_alpha, p_rot_per_sample=self.rotation_p_per_sample, p_rot_per_axis=self.rotation_p_per_axis, x_rot_in_degrees=self.rotation_x, y_rot_in_degrees=self.rotation_y, z_rot_in_degrees=self.rotation_z, p_scale_per_sample=self.scale_p_per_sample, scale_factor=self.scale_factor, skip_label=self.skip_label, ), AdditiveNoise( p_per_sample=self.additive_noise_p_per_sample, mean=self.additive_noise_mean, sigma=self.additive_noise_sigma, ), Blur( p_per_sample=self.blurring_p_per_sample, p_per_channel=self.blurring_p_per_channel, sigma=self.blurring_sigma, ), MultiplicativeNoise( p_per_sample=self.multiplicative_noise_p_per_sample, mean=self.multiplicative_noise_mean, sigma=self.multiplicative_noise_sigma, ), MotionGhosting( p_per_sample=self.motion_ghosting_p_per_sample, alpha=self.motion_ghosting_alpha, numReps=self.motion_ghosting_numreps, axes=self.motion_ghosting_axes, ), GibbsRinging( p_per_sample=self.gibbs_ringing_p_per_sample, cutFreq=self.gibbs_ringing_cutfreq, axes=self.gibbs_ringing_axes, ), SimulateLowres( p_per_sample=self.simulate_lowres_p_per_sample, p_per_channel=self.simulate_lowres_p_per_channel, p_per_axis=self.simulate_lowres_p_per_axis, zoom_range=self.simulate_lowres_zoom_range, ), BiasField(p_per_sample=self.biasfield_p_per_sample), Gamma( p_per_sample=self.gamma_p_per_sample, p_invert_image=self.gamma_p_invert_image, gamma_range=self.gamma_range, ), Mirror( p_per_sample=self.mirror_p_per_sample, axes=self.mirror_axes, p_mirror_per_axis=self.mirror_p_per_axis, skip_label=self.skip_label, ), DownsampleSegForDS(deep_supervision=self.deep_supervision), CopyImageToSeg(copy=self.copy_image_to_label),
Masking(mask=self.mask_image_for_reconstruction),
8
2023-10-26 08:13:03+00:00
16k
Elfenreigen/UniChest
optim/optim_factory.py
[ { "identifier": "Adafactor", "path": "optim/adafactor.py", "snippet": "class Adafactor(torch.optim.Optimizer):\n \"\"\"Implements Adafactor algorithm.\n This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n\n Note that this optimizer internally adjusts the learning rate depending on the\n *scale_parameter*, *relative_step* and *warmup_init* options.\n\n To use a manual (external) learning rate schedule you should set `scale_parameter=False` and\n `relative_step=False`.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): external learning rate (default: None)\n eps (tuple[float, float]): regularization constants for square gradient\n and parameter scale respectively (default: (1e-30, 1e-3))\n clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)\n decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)\n beta1 (float): coefficient used for computing running averages of gradient (default: None)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)\n relative_step (bool): if True, time-dependent learning rate is computed\n instead of external learning rate (default: True)\n warmup_init (bool): time-dependent learning rate computation depends on\n whether warm-up initialization is being used (default: False)\n \"\"\"\n\n def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,\n decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):\n relative_step = lr is None\n if warmup_init and not relative_step:\n raise ValueError('warmup_init requires relative_step=True')\n\n beta1 = None if betas is None else betas[0] # make it compat with standard betas arg\n defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,\n beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,\n relative_step=relative_step, warmup_init=warmup_init)\n super(Adafactor, self).__init__(params, defaults)\n\n @staticmethod\n def _get_lr(param_group, param_state):\n if param_group['relative_step']:\n min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2\n lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))\n param_scale = 1.0\n if param_group['scale_parameter']:\n param_scale = max(param_group['eps_scale'], param_state['RMS'])\n param_group['lr'] = lr_t * param_scale\n return param_group['lr']\n\n @staticmethod\n def _get_options(param_group, param_shape):\n factored = len(param_shape) >= 2\n use_first_moment = param_group['beta1'] is not None\n return factored, use_first_moment\n\n @staticmethod\n def _rms(tensor):\n return tensor.norm(2) / (tensor.numel() ** 0.5)\n\n def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):\n r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)\n c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()\n return torch.mul(r_factor, c_factor)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError('Adafactor does not support sparse gradients.')\n\n state = self.state[p]\n grad_shape = grad.shape\n\n factored, use_first_moment = self._get_options(group, grad_shape)\n # State Initialization\n if len(state) == 0:\n state['step'] = 0\n\n if use_first_moment:\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(grad)\n if factored:\n state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)\n state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)\n else:\n state['exp_avg_sq'] = torch.zeros_like(grad)\n\n state['RMS'] = 0\n else:\n if use_first_moment:\n state['exp_avg'] = state['exp_avg'].to(grad)\n if factored:\n state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)\n state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)\n else:\n state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)\n\n p_data_fp32 = p.data\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n\n state['step'] += 1\n state['RMS'] = self._rms(p_data_fp32)\n lr_t = self._get_lr(group, state)\n\n beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])\n update = grad ** 2 + group['eps']\n if factored:\n exp_avg_sq_row = state['exp_avg_sq_row']\n exp_avg_sq_col = state['exp_avg_sq_col']\n\n exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))\n exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))\n #exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) # pytorch 1.6+\n #exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)\n\n # Approximation of exponential moving average of square of gradient\n update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)\n update.mul_(grad)\n else:\n exp_avg_sq = state['exp_avg_sq']\n\n exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)\n #exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) # pytorch 1.6+\n update = exp_avg_sq.rsqrt().mul_(grad)\n\n update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))\n update.mul_(lr_t)\n\n if use_first_moment:\n exp_avg = state['exp_avg']\n exp_avg.mul_(group[\"beta1\"]).add_(1 - group[\"beta1\"], update)\n #exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) # pytorch 1.6+\n update = exp_avg\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group[\"weight_decay\"] * lr_t, p_data_fp32)\n #p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * lr_t) # pytorch 1.6+\n\n p_data_fp32.add_(-update)\n\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p.data.copy_(p_data_fp32)\n\n return loss" }, { "identifier": "Adahessian", "path": "optim/adahessian.py", "snippet": "class Adahessian(torch.optim.Optimizer):\n \"\"\"\n Implements the AdaHessian algorithm from \"ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning\"\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): learning rate (default: 0.1)\n betas ((float, float), optional): coefficients used for computing running averages of gradient and the\n squared hessian trace (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)\n hessian_power (float, optional): exponent of the hessian trace (default: 1.0)\n update_each (int, optional): compute the hessian trace approximation only after *this* number of steps\n (to save time) (default: 1)\n n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1)\n \"\"\"\n\n def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0,\n hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 0: {betas[0]}\")\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 1: {betas[1]}\")\n if not 0.0 <= hessian_power <= 1.0:\n raise ValueError(f\"Invalid Hessian power value: {hessian_power}\")\n\n self.n_samples = n_samples\n self.update_each = update_each\n self.avg_conv_kernel = avg_conv_kernel\n\n # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training\n self.seed = 2147483647\n self.generator = torch.Generator().manual_seed(self.seed)\n\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)\n super(Adahessian, self).__init__(params, defaults)\n\n for p in self.get_params():\n p.hess = 0.0\n self.state[p][\"hessian step\"] = 0\n\n @property\n def is_second_order(self):\n return True\n\n def get_params(self):\n \"\"\"\n Gets all parameters in all param_groups with gradients\n \"\"\"\n\n return (p for group in self.param_groups for p in group['params'] if p.requires_grad)\n\n def zero_hessian(self):\n \"\"\"\n Zeros out the accumalated hessian traces.\n \"\"\"\n\n for p in self.get_params():\n if not isinstance(p.hess, float) and self.state[p][\"hessian step\"] % self.update_each == 0:\n p.hess.zero_()\n\n @torch.no_grad()\n def set_hessian(self):\n \"\"\"\n Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.\n \"\"\"\n\n params = []\n for p in filter(lambda p: p.grad is not None, self.get_params()):\n if self.state[p][\"hessian step\"] % self.update_each == 0: # compute the trace only each `update_each` step\n params.append(p)\n self.state[p][\"hessian step\"] += 1\n\n if len(params) == 0:\n return\n\n if self.generator.device != params[0].device: # hackish way of casting the generator to the right device\n self.generator = torch.Generator(params[0].device).manual_seed(self.seed)\n\n grads = [p.grad for p in params]\n\n for i in range(self.n_samples):\n # Rademacher distribution {-1.0, 1.0}\n zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params]\n h_zs = torch.autograd.grad(\n grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1)\n for h_z, z, p in zip(h_zs, zs, params):\n p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"\n Performs a single optimization step.\n Arguments:\n closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)\n \"\"\"\n\n loss = None\n if closure is not None:\n loss = closure()\n\n self.zero_hessian()\n self.set_hessian()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None or p.hess is None:\n continue\n\n if self.avg_conv_kernel and p.dim() == 4:\n p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()\n\n # Perform correct stepweight decay as in AdamW\n p.mul_(1 - group['lr'] * group['weight_decay'])\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 1:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p)\n # Exponential moving average of Hessian diagonal square values\n state['exp_hessian_diag_sq'] = torch.zeros_like(p)\n\n exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']\n beta1, beta2 = group['betas']\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)\n exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n k = group['hessian_power']\n denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])\n\n # make update\n step_size = group['lr'] / bias_correction1\n p.addcdiv_(exp_avg, denom, value=-step_size)\n\n return loss" }, { "identifier": "AdamP", "path": "optim/adamp.py", "snippet": "class AdamP(Optimizer):\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\n delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)\n super(AdamP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n x_norm = x.norm(dim=1).add_(eps)\n y_norm = y.norm(dim=1).add_(eps)\n dot = (x * y).sum(dim=1)\n\n return dot.abs() / x_norm / y_norm\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n\n grad = p.grad.data\n beta1, beta2 = group['betas']\n nesterov = group['nesterov']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p.data)\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n # Adam\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n\n state['step'] += 1\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n step_size = group['lr'] / bias_correction1\n\n if nesterov:\n perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom\n else:\n perturb = exp_avg / denom\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])\n\n # Weight decay\n if group['weight_decay'] > 0:\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)\n\n # Step\n p.data.add_(-step_size, perturb)\n\n return loss" }, { "identifier": "Lookahead", "path": "optim/lookahead.py", "snippet": "class Lookahead(Optimizer):\n def __init__(self, base_optimizer, alpha=0.5, k=6):\n if not 0.0 <= alpha <= 1.0:\n raise ValueError(f'Invalid slow update rate: {alpha}')\n if not 1 <= k:\n raise ValueError(f'Invalid lookahead steps: {k}')\n defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0)\n self.base_optimizer = base_optimizer\n self.param_groups = self.base_optimizer.param_groups\n self.defaults = base_optimizer.defaults\n self.defaults.update(defaults)\n self.state = defaultdict(dict)\n # manually add our defaults to the param groups\n for name, default in defaults.items():\n for group in self.param_groups:\n group.setdefault(name, default)\n\n def update_slow(self, group):\n for fast_p in group[\"params\"]:\n if fast_p.grad is None:\n continue\n param_state = self.state[fast_p]\n if 'slow_buffer' not in param_state:\n param_state['slow_buffer'] = torch.empty_like(fast_p.data)\n param_state['slow_buffer'].copy_(fast_p.data)\n slow = param_state['slow_buffer']\n slow.add_(group['lookahead_alpha'], fast_p.data - slow)\n fast_p.data.copy_(slow)\n\n def sync_lookahead(self):\n for group in self.param_groups:\n self.update_slow(group)\n\n def step(self, closure=None):\n #assert id(self.param_groups) == id(self.base_optimizer.param_groups)\n loss = self.base_optimizer.step(closure)\n for group in self.param_groups:\n group['lookahead_step'] += 1\n if group['lookahead_step'] % group['lookahead_k'] == 0:\n self.update_slow(group)\n return loss\n\n def state_dict(self):\n fast_state_dict = self.base_optimizer.state_dict()\n slow_state = {\n (id(k) if isinstance(k, torch.Tensor) else k): v\n for k, v in self.state.items()\n }\n fast_state = fast_state_dict['state']\n param_groups = fast_state_dict['param_groups']\n return {\n 'state': fast_state,\n 'slow_state': slow_state,\n 'param_groups': param_groups,\n }\n\n def load_state_dict(self, state_dict):\n fast_state_dict = {\n 'state': state_dict['state'],\n 'param_groups': state_dict['param_groups'],\n }\n self.base_optimizer.load_state_dict(fast_state_dict)\n\n # We want to restore the slow state, but share param_groups reference\n # with base_optimizer. This is a bit redundant but least code\n slow_state_new = False\n if 'slow_state' not in state_dict:\n print('Loading state_dict from optimizer without Lookahead applied.')\n state_dict['slow_state'] = defaultdict(dict)\n slow_state_new = True\n slow_state_dict = {\n 'state': state_dict['slow_state'],\n 'param_groups': state_dict['param_groups'], # this is pointless but saves code\n }\n super(Lookahead, self).load_state_dict(slow_state_dict)\n self.param_groups = self.base_optimizer.param_groups # make both ref same container\n if slow_state_new:\n # reapply defaults to catch missing lookahead specific ones\n for name, default in self.defaults.items():\n for group in self.param_groups:\n group.setdefault(name, default)" }, { "identifier": "Nadam", "path": "optim/nadam.py", "snippet": "class Nadam(Optimizer):\n \"\"\"Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).\n\n It has been proposed in `Incorporating Nesterov Momentum into Adam`__.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 2e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n schedule_decay (float, optional): momentum schedule decay (default: 4e-3)\n\n __ http://cs229.stanford.edu/proj2015/054_report.pdf\n __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf\n\n Originally taken from: https://github.com/pytorch/pytorch/pull/1408\n NOTE: Has potential issues but does work well on some problems.\n \"\"\"\n\n def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, schedule_decay=4e-3):\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, schedule_decay=schedule_decay)\n super(Nadam, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['m_schedule'] = 1.\n state['exp_avg'] = grad.new().resize_as_(grad).zero_()\n state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()\n\n # Warming momentum schedule\n m_schedule = state['m_schedule']\n schedule_decay = group['schedule_decay']\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n eps = group['eps']\n state['step'] += 1\n t = state['step']\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n momentum_cache_t = beta1 * \\\n (1. - 0.5 * (0.96 ** (t * schedule_decay)))\n momentum_cache_t_1 = beta1 * \\\n (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))\n m_schedule_new = m_schedule * momentum_cache_t\n m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1\n state['m_schedule'] = m_schedule_new\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1. - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1. - beta2, grad, grad)\n exp_avg_sq_prime = exp_avg_sq / (1. - beta2 ** t)\n denom = exp_avg_sq_prime.sqrt_().add_(eps)\n\n p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new), grad, denom)\n p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next), exp_avg, denom)\n\n return loss" }, { "identifier": "NovoGrad", "path": "optim/novograd.py", "snippet": "class NovoGrad(Optimizer):\n def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n super(NovoGrad, self).__init__(params, defaults)\n self._lr = lr\n self._beta1 = betas[0]\n self._beta2 = betas[1]\n self._eps = eps\n self._wd = weight_decay\n self._grad_averaging = grad_averaging\n\n self._momentum_initialized = False\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n if not self._momentum_initialized:\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n state = self.state[p]\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('NovoGrad does not support sparse gradients')\n\n v = torch.norm(grad)**2\n m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data\n state['step'] = 0\n state['v'] = v\n state['m'] = m\n state['grad_ema'] = None\n self._momentum_initialized = True\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n state = self.state[p]\n state['step'] += 1\n\n step, v, m = state['step'], state['v'], state['m']\n grad_ema = state['grad_ema']\n\n grad = p.grad.data\n g2 = torch.norm(grad)**2\n grad_ema = g2 if grad_ema is None else grad_ema * \\\n self._beta2 + g2 * (1. - self._beta2)\n grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps)\n\n if self._grad_averaging:\n grad *= (1. - self._beta1)\n\n g2 = torch.norm(grad)**2\n v = self._beta2*v + (1. - self._beta2)*g2\n m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data)\n bias_correction1 = 1 - self._beta1 ** step\n bias_correction2 = 1 - self._beta2 ** step\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n state['v'], state['m'] = v, m\n state['grad_ema'] = grad_ema\n p.data.add_(-step_size, m)\n return loss" }, { "identifier": "NvNovoGrad", "path": "optim/nvnovograd.py", "snippet": "class NvNovoGrad(Optimizer):\n \"\"\"\n Implements Novograd algorithm.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.95, 0.98))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n grad_averaging: gradient averaging\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8,\n weight_decay=0, grad_averaging=False, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay,\n grad_averaging=grad_averaging,\n amsgrad=amsgrad)\n\n super(NvNovoGrad, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(NvNovoGrad, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Sparse gradients are not supported.')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n norm = torch.sum(torch.pow(grad, 2))\n\n if exp_avg_sq == 0:\n exp_avg_sq.copy_(norm)\n else:\n exp_avg_sq.mul_(beta2).add_(1 - beta2, norm)\n\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n grad.div_(denom)\n if group['weight_decay'] != 0:\n grad.add_(group['weight_decay'], p.data)\n if group['grad_averaging']:\n grad.mul_(1 - beta1)\n exp_avg.mul_(beta1).add_(grad)\n\n p.data.add_(-group['lr'], exp_avg)\n\n return loss" }, { "identifier": "RAdam", "path": "optim/radam.py", "snippet": "class RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.buffer = [[None, None, None] for ind in range(10)]\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n else:\n p_data_fp32.add_(-step_size, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss" }, { "identifier": "RMSpropTF", "path": "optim/rmsprop_tf.py", "snippet": "class RMSpropTF(Optimizer):\n \"\"\"Implements RMSprop algorithm (TensorFlow style epsilon)\n\n NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt\n and a few other modifications to closer match Tensorflow for matching hyper-params.\n\n Noteworthy changes include:\n 1. Epsilon applied inside square-root\n 2. square_avg initialized to ones\n 3. LR scaling of update accumulated in momentum buffer\n\n Proposed by G. Hinton in his\n `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.\n\n The centered version first appears in `Generating Sequences\n With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-2)\n momentum (float, optional): momentum factor (default: 0)\n alpha (float, optional): smoothing (decay) constant (default: 0.9)\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-10)\n centered (bool, optional) : if ``True``, compute the centered RMSProp,\n the gradient is normalized by an estimation of its variance\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101\n lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer\n update as per defaults in Tensorflow\n\n \"\"\"\n\n def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,\n decoupled_decay=False, lr_in_momentum=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= momentum:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if not 0.0 <= weight_decay:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n if not 0.0 <= alpha:\n raise ValueError(\"Invalid alpha value: {}\".format(alpha))\n\n defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,\n decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)\n super(RMSpropTF, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RMSpropTF, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('momentum', 0)\n group.setdefault('centered', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('RMSprop does not support sparse gradients')\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero\n if group['momentum'] > 0:\n state['momentum_buffer'] = torch.zeros_like(p.data)\n if group['centered']:\n state['grad_avg'] = torch.zeros_like(p.data)\n\n square_avg = state['square_avg']\n one_minus_alpha = 1. - group['alpha']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n if 'decoupled_decay' in group and group['decoupled_decay']:\n p.data.add_(-group['weight_decay'], p.data)\n else:\n grad = grad.add(group['weight_decay'], p.data)\n\n # Tensorflow order of ops for updating squared avg\n square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg)\n # square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original\n\n if group['centered']:\n grad_avg = state['grad_avg']\n grad_avg.add_(one_minus_alpha, grad - grad_avg)\n # grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original\n avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt\n else:\n avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt\n\n if group['momentum'] > 0:\n buf = state['momentum_buffer']\n # Tensorflow accumulates the LR scaling in the momentum buffer\n if 'lr_in_momentum' in group and group['lr_in_momentum']:\n buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)\n p.data.add_(-buf)\n else:\n # PyTorch scales the param update by LR\n buf.mul_(group['momentum']).addcdiv_(grad, avg)\n p.data.add_(-group['lr'], buf)\n else:\n p.data.addcdiv_(-group['lr'], grad, avg)\n\n return loss" }, { "identifier": "SGDP", "path": "optim/sgdp.py", "snippet": "class SGDP(Optimizer):\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,\n nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)\n super(SGDP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n x_norm = x.norm(dim=1).add_(eps)\n y_norm = y.norm(dim=1).add_(eps)\n dot = (x * y).sum(dim=1)\n\n return dot.abs() / x_norm / y_norm\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['momentum'] = torch.zeros_like(p.data)\n\n # SGD\n buf = state['momentum']\n buf.mul_(momentum).add_(1 - dampening, grad)\n if nesterov:\n d_p = grad + momentum * buf\n else:\n d_p = buf\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])\n\n # Weight decay\n if weight_decay != 0:\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))\n\n # Step\n p.data.add_(-group['lr'], d_p)\n\n return loss" } ]
import torch from torch import optim as optim from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .lookahead import Lookahead from .nadam import Nadam from .novograd import NovoGrad from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
12,364
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam':
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
7
2023-10-30 00:24:16+00:00
16k
YichenZW/Coh-MGT-Detection
run_detector.py
[ { "identifier": "glue_compute_metrics", "path": "util.py", "snippet": "def glue_compute_metrics(task_name, preds, labels):\n assert len(preds) == len(labels)\n if task_name == \"cola\":\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n elif task_name == \"sst-2\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mrpc\" or task_name == \"deepfake\":\n return acc_and_f1(preds, labels)\n elif task_name == \"sts-b\":\n return pearson_and_spearman(preds, labels)\n elif task_name == \"qqp\":\n return acc_and_f1(preds, labels)\n elif task_name == \"mnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mnli-mm\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"qnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"rte\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"wnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"hans\":\n return {\"acc\": simple_accuracy(preds, labels)}\n else:\n raise KeyError(task_name)" }, { "identifier": "glue_convert_examples_to_features", "path": "util.py", "snippet": "def glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n \"\"\"\n Loads a data file into a list of ``InputFeatures``\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: GLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n Returns:\n If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``\n containing the task-specific features. If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n \"\"\"\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for ex_index, example in enumerate(examples):\n len_examples = 0\n\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(\n example.text_a,\n add_special_tokens=True,\n max_length=max_length,\n return_token_type_ids=True,\n )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # Tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = (\n [0 if mask_padding_with_zero else 1] * padding_length\n ) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + (\n [0 if mask_padding_with_zero else 1] * padding_length\n )\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(input_ids), max_length\n )\n assert (\n len(attention_mask) == max_length\n ), \"Error with input length {} vs {}\".format(len(attention_mask), max_length)\n assert (\n len(token_type_ids) == max_length\n ), \"Error with input length {} vs {}\".format(len(token_type_ids), max_length)\n\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\n \"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask])\n )\n logger.info(\n \"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids])\n )\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n nodes_index=example.nodes_index,\n adj_metric=example.adj_metric,\n sen2node=example.sen2node,\n nodes_ent=example.nodes_ent,\n )\n )\n\n return features" }, { "identifier": "glue_output_modes", "path": "util.py", "snippet": "class InputExample(object):\nclass InputFeatures(object):\nclass DeepFakeProcessor(DataProcessor):\n def __init__(\n self,\n guid,\n text_a,\n text_b=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n all_tokens=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\n def __init__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\ndef glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n def get_example_from_tensor_dict(self, tensor_dict):\n def _read_jsonl(self, path):\n def get_train_examples(\n self, with_relation, data_dir, train_file=\"gpt2_500_train_Graph.jsonl\"\n ):\n def get_dev_examples(\n self, with_relation, data_dir, dev_file=\"gpt2_dev_Graph.jsonl\"\n ):\n def get_test_examples(\n self, with_relation, data_dir, test_file=\"gpt2_test_Graph.jsonl\"\n ):\n def get_labels(self):\n def _get_nodes(self, nodes):\n def _get_adj_metric(self, edges, drop_nodes, node_num, with_relation):\n def clean_string(self, string):\n def _create_examples(self, with_relation, inputs, set_type):\ndef simple_accuracy(preds, labels):\ndef acc_and_f1(preds, labels):\ndef pearson_and_spearman(preds, labels):\ndef glue_compute_metrics(task_name, preds, labels):\ndef xnli_compute_metrics(task_name, preds, labels):" }, { "identifier": "glue_processors", "path": "util.py", "snippet": "class InputExample(object):\nclass InputFeatures(object):\nclass DeepFakeProcessor(DataProcessor):\n def __init__(\n self,\n guid,\n text_a,\n text_b=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n all_tokens=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\n def __init__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\ndef glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n def get_example_from_tensor_dict(self, tensor_dict):\n def _read_jsonl(self, path):\n def get_train_examples(\n self, with_relation, data_dir, train_file=\"gpt2_500_train_Graph.jsonl\"\n ):\n def get_dev_examples(\n self, with_relation, data_dir, dev_file=\"gpt2_dev_Graph.jsonl\"\n ):\n def get_test_examples(\n self, with_relation, data_dir, test_file=\"gpt2_test_Graph.jsonl\"\n ):\n def get_labels(self):\n def _get_nodes(self, nodes):\n def _get_adj_metric(self, edges, drop_nodes, node_num, with_relation):\n def clean_string(self, string):\n def _create_examples(self, with_relation, inputs, set_type):\ndef simple_accuracy(preds, labels):\ndef acc_and_f1(preds, labels):\ndef pearson_and_spearman(preds, labels):\ndef glue_compute_metrics(task_name, preds, labels):\ndef xnli_compute_metrics(task_name, preds, labels):" }, { "identifier": "RobertaForGraphBasedSequenceClassification", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification(\n BertPreTrainedModel\n): \n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification, self).__init__(config)\n self.num_labels = config.num_labels\n self.classifier = RobertaClassificationHead(config, graph_node_size=None)\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size, self.node_size, self.max_sentence_size\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n \n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(whole_rep, dim=-1)\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n outputs = (loss,) + outputs\n\n return outputs, whole_rep " }, { "identifier": "RobertaForGraphBasedSequenceClassification_CL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_CL(BertPreTrainedModel):\n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_CL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(torch.cat([sequence_output, graph_rep], dim=-1))\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n batch_size = len(labels)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx\n for idx in range(batch_size)\n if int(labels.view(-1)[idx]) == i\n ] \n\n contraloss = self.contrastive_loss_labelwise_winslide(\n batch_size, batch_idx_by_label, whole_rep\n )\n\n loss_fct = CrossEntropyLoss()\n ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * ce_loss + contraloss_weight * contraloss\n outputs = (loss,) + outputs\n return outputs, whole_rep \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_labelwise_winslide(\n self, batch_size, batch_idx_by_label, hidden_feats\n ):\n \"\"\"\n Hidden feats must be normalized\n\n \"\"\"\n hidden_feats = F.normalize(hidden_feats, dim=1)\n sim_matrix = torch.mm(hidden_feats, hidden_feats.T) \n loss = 0.0\n\n for i in range(batch_size):\n label_list = self.get_key(batch_idx_by_label, i)\n label = label_list[0]\n one_same_label = (\n torch.zeros((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 1.0,\n )\n )\n one_diff_label = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 0.0,\n )\n )\n one_for_not_i = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(0, torch.tensor([i]).to(sim_matrix.device), 0.0)\n ) \n one_for_numerator = one_same_label.mul(one_for_not_i)\n\n numerator = torch.sum(\n one_for_numerator * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n denominator = torch.sum(\n one_for_not_i * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n\n if numerator == 0:\n numerator += 1e-6\n if denominator == 0:\n denominator += 1e-6\n\n loss += -torch.log(numerator / denominator)\n\n return loss / batch_size" }, { "identifier": "RobertaForGraphBasedSequenceClassification_MBCL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_MBCL(BertPreTrainedModel):\n def __init__(self, config, mb_dataloader, train_idx_by_label):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_MBCL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n self.train_idx_by_label = train_idx_by_label\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.model_q = EncoderForMBCL(config)\n self.model_k = EncoderForMBCL(config)\n for param_q, param_k in zip(\n self.model_q.parameters(), self.model_k.parameters()\n ):\n param_k.data.copy_(param_q.data) \n self.model_q.cuda()\n self.model_k.cuda()\n with torch.no_grad():\n for k, item in enumerate(mb_dataloader):\n input_ids = item[0].cuda()\n attention_mask = item[1].cuda()\n labels = item[3].cuda()\n nodes_index_mask = item[4].cuda()\n adj_metric = item[5].cuda()\n node_mask = item[6].cuda()\n sen2node = item[7].cuda()\n sentence_mask = item[8].cuda()\n sentence_length = item[9].cuda()\n\n output = self.model_q(\n input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels,\n nodes_index_mask=nodes_index_mask,\n adj_metric=adj_metric,\n node_mask=node_mask,\n sen2node=sen2node,\n sentence_mask=sentence_mask,\n sentence_length=sentence_length,\n )\n init_feat = F.normalize(output[1], dim=1)\n if k == 0:\n self.queue = init_feat\n else:\n self.queue = torch.vstack((self.queue, init_feat))\n\n print(self.queue.size())\n print(\"***queue already builded***\")\n\n self.config = self.model_q.config\n self.feat_dim = self.config.hidden_size\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n if self.training:\n batch_size = int(input_ids.size(0))\n output_q = self.model_q(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n ) \n q_feat = output_q[1]\n logits = self.classifier(output_q[1])\n outputs = (logits,) + output_q[0]\n loss_fct = CrossEntropyLoss()\n q_ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n output_k = self.model_k(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n )\n k_feat = output_k[1]\n self.dequeue_and_enqueue(k_feat, batch_id)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx for idx in range(batch_size) if labels[idx] == i\n ] \n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, q_feat\n )\n self.momentum_update(m=0.999)\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * q_ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, output_q[1] \n else:\n batch_size = int(input_ids.size(0))\n output_q = self.model_q(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n ) \n q_feat = output_q[1]\n logits = self.classifier(output_q[1])\n outputs = (logits,) + output_q[0]\n loss_fct = CrossEntropyLoss()\n q_ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx for idx in range(batch_size) if labels[idx] == i\n ] \n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, q_feat\n )\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * q_ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, output_q[1] \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_es(self, batch_size, batch_idx_by_label, hidden_feats):\n hidden_feats = F.normalize(hidden_feats, dim=1)\n change_dic = {0: 1, 1: 0}\n loss = 0\n\n for i in batch_idx_by_label:\n q = hidden_feats[batch_idx_by_label[i]]\n pos_bank = self.queue[self.train_idx_by_label[i]]\n pos_pair = torch.mm(q, pos_bank.transpose(0, 1))\n bottom_k = torch.topk(pos_pair, k=100, dim=1, largest=False).values\n neg_bank = self.queue[self.train_idx_by_label[change_dic[i]]]\n neg_pair = torch.mm(q, neg_bank.transpose(0, 1))\n top_k = torch.topk(neg_pair, k=100, dim=1).values\n numerator = torch.sum(torch.exp(bottom_k / self.temperature), dim=1)\n denominator = (\n torch.sum(torch.exp(top_k / self.temperature), dim=1) + numerator\n )\n\n for nid in range(len(numerator)):\n if numerator[nid] == 0:\n numerator[nid] += 1e-6\n for did in range(len(denominator)):\n if denominator[did] == 0:\n denominator[did] += 1e-6\n loss += torch.sum(-1.0 * torch.log(numerator / denominator))\n\n return loss / batch_size\n\n @torch.no_grad()\n def momentum_update(self, m=0.999):\n \"\"\"\n encoder_k = m * encoder_k + (1 - m) encoder_q\n \"\"\"\n for param_q, param_k in zip(\n self.model_q.parameters(), self.model_k.parameters()\n ):\n param_k.data = param_k.data * m + param_q.data * (1.0 - m)\n\n def dequeue_and_enqueue(self, hidden_batch_feats, selected_batch_idx):\n \"\"\"\n Update memory bank by batch window slide; hidden_batch_feats must be normalized\n \"\"\"\n assert hidden_batch_feats.size()[1] == self.queue.size()[1]\n\n self.queue[selected_batch_idx] = F.normalize(hidden_batch_feats, dim=1)" }, { "identifier": "EncoderForMBCL", "path": "modeling_roberta.py", "snippet": "class EncoderForMBCL(BertPreTrainedModel):\n def __init__(self, config):\n super(EncoderForMBCL, self).__init__(config)\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :] \n hidden_states = outputs[2][0] \n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1) \n\n return outputs[2:], whole_rep" }, { "identifier": "RobertaForGraphBasedSequenceClassification_RFCL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_RFCL(BertPreTrainedModel):\n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_RFCL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(torch.cat([sequence_output, graph_rep], dim=-1))\n\n outputs = (logits,) + outputs[2:]\n\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n batch_size = len(labels)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx\n for idx in range(batch_size)\n if int(labels.view(-1)[idx]) == i\n ] \n\n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, whole_rep\n )\n\n loss_fct = CrossEntropyLoss()\n ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, whole_rep \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_es(self, batch_size, batch_idx_by_label, hidden_feats):\n hidden_feats = F.normalize(hidden_feats, dim=1)\n loss = 0\n sim_matrix = torch.mm(hidden_feats, hidden_feats.T) \n loss = 0.0\n\n for i in range(batch_size):\n label_list = self.get_key(batch_idx_by_label, i)\n label = label_list[0]\n one_same_label = (\n torch.zeros((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 1.0,\n )\n )\n one_diff_label = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 0.0,\n )\n )\n one_for_not_i = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(0, torch.tensor([i]).to(sim_matrix.device), 0.0)\n ) \n one_for_numerator = one_same_label.mul(one_for_not_i)\n one_for_neg = one_diff_label.mul(one_for_not_i)\n\n numerator = torch.sum(\n one_for_numerator * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n denominator = torch.sum(\n one_for_not_i * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n\n if numerator == 0:\n numerator += 1e-6\n if denominator == 0:\n denominator += 1e-6\n\n loss += -torch.log(numerator / denominator)\n\n return loss / batch_size" } ]
import os import torch import argparse import logging import random import wandb import numpy as np import ray from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.optim import AdamW from transformers import ( set_seed, AutoTokenizer, AutoConfig, AutoModel, AutoModelForSequenceClassification, get_linear_schedule_with_warmup, ) from functools import partial from util import glue_compute_metrics as compute_metrics from util import ( glue_convert_examples_to_features as convert_examples_to_features, ) from util import glue_output_modes as output_modes from util import glue_processors as processors from modeling_roberta import ( RobertaForGraphBasedSequenceClassification, RobertaForGraphBasedSequenceClassification_CL, RobertaForGraphBasedSequenceClassification_MBCL, EncoderForMBCL, RobertaForGraphBasedSequenceClassification_RFCL, ) from ray import tune from ray.tune import CLIReporter from ray.tune.schedulers import ASHAScheduler from apex import amp
11,050
) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def evaluate(args, model, tokenizer, checkpoint=None, prefix="", mode="dev"): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier()
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Based on code from the above authors, modifications made by Xi'an Jiaotong University. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def number_h(num): for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(num) < 1000.0: return "%3.1f%s" % (num, unit) num /= 1000.0 return "%.1f%s" % (num, "Yi") def generate_shaped_nodes_mask(nodes, max_seq_length, max_nodes_num): nodes_mask = np.zeros(shape=(max_nodes_num, max_seq_length)) nodes_num = min(len(nodes), max_nodes_num) for i in range(nodes_num): span = nodes[i] if span[0] != -1: if span[0] < max_seq_length - 1: end_pos = ( span[1] if span[1] < max_seq_length - 1 else max_seq_length - 1 ) nodes_mask[i, span[0] + 1 : end_pos + 1] = 1 else: continue return nodes_mask, nodes_num def generate_shaped_edge_mask(adj_metric, nodes_num, max_nodes_num, relation_n): if nodes_num != 0: if relation_n != 0: new_adj_metric = np.zeros(shape=(relation_n, max_nodes_num, max_nodes_num)) for i in range(relation_n): new_adj_metric[i][:nodes_num, :nodes_num] = adj_metric[i][ :nodes_num, :nodes_num ] else: new_adj_metric = np.zeros(shape=(max_nodes_num, max_nodes_num)) new_adj_metric[:nodes_num, :nodes_num] = adj_metric[:nodes_num, :nodes_num] return new_adj_metric def train(args, train_dataset, model, tokenizer): """Train the model""" total_params = sum(p.numel() for p in model.parameters()) total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad ) print("Total Params:", number_h(total_params)) print("Total Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile( os.path.join(args.model_name_or_path, "optimizer.pt") ) and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt")): optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")) ) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")) ) if args.fp16: try: except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize( model, optimizer, opt_level=args.fp16_opt_level ) # Multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_acc, best_f1 = 0.0, 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // ( len(train_dataloader) // args.gradient_accumulation_steps ) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps ) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info( " Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch, ) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) outputs, _ = model(**inputs) loss = outputs[0] wandb.log({"train/loss": loss}) if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( model.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"eval/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, model, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def mb_train(args, train_dataset, encoder_q, encoder_k, dataloader, tokenizer): """Train the model""" global memory_queue encoder_q.train() total_params = sum(p.numel() for p in encoder_q.parameters()) total_trainable_params = sum( p.numel() for p in encoder_q.parameters() if p.requires_grad ) print("Encoder Params:", number_h(total_params)) print("Encoder Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in encoder_q.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in encoder_q.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_f1 = 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 tr_loss, logging_loss = 0.0, 0.0 encoder_q.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue encoder_q.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids q_outputs, q_rep = encoder_q(**inputs) # Model outputs are always tuple in transformers (see doc). if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( encoder_q.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() encoder_q.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, encoder_q, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"train/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, encoder_q, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( encoder_q.module if hasattr(encoder_q, "module") else encoder_q ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def evaluate(args, model, tokenizer, checkpoint=None, prefix="", mode="dev"): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier()
processor = processors[task]()
0
2023-10-24 14:03:11+00:00
16k
deforum-studio/deforum
src/deforum/models/depth_models/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py
[ { "identifier": "DepthModel", "path": "src/deforum/models/depth_models/zoedepth/models/depth_model.py", "snippet": "class DepthModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.device = 'cuda'\n \n def to(self, device) -> nn.Module:\n self.device = device\n return super().to(device)\n \n def forward(self, x, *args, **kwargs):\n raise NotImplementedError\n \n def _infer(self, x: torch.Tensor):\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n return self(x)['metric_depth']\n \n def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode=\"reflect\", **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with padding augmentation\n Padding augmentation fixes the boundary artifacts in the output depth map.\n Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.\n This augmentation pads the input image and crops the prediction back to the original size / view.\n\n Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to pad the input or not. Defaults to True.\n fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.\n fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.\n upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.\n padding_mode (str, optional): padding mode. Defaults to \"reflect\".\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # assert x is nchw and c = 3\n assert x.dim() == 4, \"x must be 4 dimensional, got {}\".format(x.dim())\n assert x.shape[1] == 3, \"x must have 3 channels, got {}\".format(x.shape[1])\n\n if pad_input:\n assert fh > 0 or fw > 0, \"atlease one of fh and fw must be greater than 0\"\n pad_h = int(np.sqrt(x.shape[2]/2) * fh)\n pad_w = int(np.sqrt(x.shape[3]/2) * fw)\n padding = [pad_w, pad_w]\n if pad_h > 0:\n padding += [pad_h, pad_h]\n \n x = F.pad(x, padding, mode=padding_mode, **kwargs)\n out = self._infer(x)\n if out.shape[-2:] != x.shape[-2:]:\n out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)\n if pad_input:\n # crop to the original size, handling the case where pad_h and pad_w is 0\n if pad_h > 0:\n out = out[:, :, pad_h:-pad_h,:]\n if pad_w > 0:\n out = out[:, :, :, pad_w:-pad_w]\n return out\n \n def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with horizontal flip augmentation\n Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # infer with horizontal flip and average\n out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)\n out = (out + torch.flip(out_flip, dims=[3])) / 2\n return out\n \n def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n if with_flip_aug:\n return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)\n else:\n return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n \n @torch.no_grad()\n def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str=\"numpy\", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:\n \"\"\"\n Inference interface for the model for PIL image\n Args:\n pil_img (PIL.Image.Image): input PIL image\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to \"numpy\".\n \"\"\"\n x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)\n out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)\n if output_type == \"numpy\":\n return out_tensor.squeeze().cpu().numpy()\n elif output_type == \"pil\":\n # uint16 is required for depth pil image\n out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)\n return Image.fromarray(out_16bit_numpy)\n elif output_type == \"tensor\":\n return out_tensor.squeeze().cpu()\n else:\n raise ValueError(f\"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'\")" }, { "identifier": "MidasCore", "path": "src/deforum/models/depth_models/zoedepth/models/base_models/midas.py", "snippet": "class MidasCore(nn.Module):\n def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,\n img_size=384, **kwargs):\n \"\"\"Midas Base model used for multi-scale feature extraction.\n\n Args:\n midas (torch.nn.Module): Midas model.\n trainable (bool, optional): Train midas model. Defaults to False.\n fetch_features (bool, optional): Extract multi-scale features. Defaults to True.\n layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').\n freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.\n keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.\n img_size (int, tuple, optional): Input resolution. Defaults to 384.\n \"\"\"\n super().__init__()\n self.core = midas\n self.output_channels = None\n self.core_out = {}\n self.trainable = trainable\n self.fetch_features = fetch_features\n # midas.scratch.output_conv = nn.Identity()\n self.handles = []\n # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']\n self.layer_names = layer_names\n\n self.set_trainable(trainable)\n self.set_fetch_features(fetch_features)\n\n self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,\n img_size=img_size, do_resize=kwargs.get('do_resize', True))\n\n if freeze_bn:\n self.freeze_bn()\n\n def set_trainable(self, trainable):\n self.trainable = trainable\n if trainable:\n self.unfreeze()\n else:\n self.freeze()\n return self\n\n def set_fetch_features(self, fetch_features):\n self.fetch_features = fetch_features\n if fetch_features:\n if len(self.handles) == 0:\n self.attach_hooks(self.core)\n else:\n self.remove_hooks()\n return self\n\n def freeze(self):\n for p in self.parameters():\n p.requires_grad = False\n self.trainable = False\n return self\n\n def unfreeze(self):\n for p in self.parameters():\n p.requires_grad = True\n self.trainable = True\n return self\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n return self\n\n def forward(self, x, denorm=False, return_rel_depth=False):\n with torch.no_grad():\n if denorm:\n x = denormalize(x)\n x = self.prep(x)\n # print(\"Shape after prep: \", x.shape)\n\n with torch.set_grad_enabled(self.trainable):\n\n # print(\"Input size to Midascore\", x.shape)\n rel_depth = self.core(x)\n # print(\"Output from midas shape\", rel_depth.shape)\n if not self.fetch_features:\n return rel_depth\n out = [self.core_out[k] for k in self.layer_names]\n\n if return_rel_depth:\n return rel_depth, out\n return out\n\n def get_rel_pos_params(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" in name:\n yield p\n\n def get_enc_params_except_rel_pos(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" not in name:\n yield p\n\n def freeze_encoder(self, freeze_rel_pos=False):\n if freeze_rel_pos:\n for p in self.core.pretrained.parameters():\n p.requires_grad = False\n else:\n for p in self.get_enc_params_except_rel_pos():\n p.requires_grad = False\n return self\n\n def attach_hooks(self, midas):\n if len(self.handles) > 0:\n self.remove_hooks()\n if \"out_conv\" in self.layer_names:\n self.handles.append(list(midas.scratch.output_conv.children())[\n 3].register_forward_hook(get_activation(\"out_conv\", self.core_out)))\n if \"r4\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet4.register_forward_hook(\n get_activation(\"r4\", self.core_out)))\n if \"r3\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet3.register_forward_hook(\n get_activation(\"r3\", self.core_out)))\n if \"r2\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet2.register_forward_hook(\n get_activation(\"r2\", self.core_out)))\n if \"r1\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet1.register_forward_hook(\n get_activation(\"r1\", self.core_out)))\n if \"l4_rn\" in self.layer_names:\n self.handles.append(midas.scratch.layer4_rn.register_forward_hook(\n get_activation(\"l4_rn\", self.core_out)))\n\n return self\n\n def remove_hooks(self):\n for h in self.handles:\n h.remove()\n return self\n\n def __del__(self):\n self.remove_hooks()\n\n def set_output_channels(self, model_type):\n self.output_channels = MIDAS_SETTINGS[model_type]\n\n @staticmethod\n def build(midas_model_type=\"DPT_BEiT_L_384\", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):\n if midas_model_type not in MIDAS_SETTINGS:\n raise ValueError(\n f\"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}\")\n if \"img_size\" in kwargs:\n kwargs = MidasCore.parse_img_size(kwargs)\n img_size = kwargs.pop(\"img_size\", [384, 384])\n print(\"img_size\", img_size)\n midas = torch.hub.load(\"intel-isl/MiDaS\", midas_model_type,\n pretrained=use_pretrained_midas, force_reload=force_reload)\n kwargs.update({'keep_aspect_ratio': force_keep_ar})\n midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features,\n freeze_bn=freeze_bn, img_size=img_size, **kwargs)\n midas_core.set_output_channels(midas_model_type)\n return midas_core\n\n @staticmethod\n def build_from_config(config):\n return MidasCore.build(**config)\n\n @staticmethod\n def parse_img_size(config):\n assert 'img_size' in config\n if isinstance(config['img_size'], str):\n assert \",\" in config['img_size'], \"img_size should be a string with comma separated img_size=H,W\"\n config['img_size'] = list(map(int, config['img_size'].split(\",\")))\n assert len(\n config['img_size']) == 2, \"img_size should be a string with comma separated img_size=H,W\"\n elif isinstance(config['img_size'], int):\n config['img_size'] = [config['img_size'], config['img_size']]\n else:\n assert isinstance(config['img_size'], list) and len(\n config['img_size']) == 2, \"img_size should be a list of H,W\"\n return config" }, { "identifier": "AttractorLayer", "path": "src/deforum/models/depth_models/zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayer(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n eps = 1e-3\n A = A + eps\n n, c, h, w = A.shape\n A = A.view(n, self.n_attractors, 2, h, w)\n A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w\n A_normed = A[:, :, 0, ...] # n, na, h, w\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(dist(A_normed.unsqueeze(\n 2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n # .shape N, nbins, h, w\n delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = (self.max_depth - self.min_depth) * \\\n b_new_centers + self.min_depth\n B_centers, _ = torch.sort(B_centers, dim=1)\n B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)\n return b_new_centers, B_centers" }, { "identifier": "AttractorLayerUnnormed", "path": "src/deforum/models/depth_models/zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayerUnnormed(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are unbounded\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n n, c, h, w = A.shape\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(\n dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n delta_c += dist(A[:, i, ...].unsqueeze(1) -\n b_centers) # .shape N, nbins, h, w\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = b_new_centers\n\n return b_new_centers, B_centers" }, { "identifier": "ConditionalLogBinomial", "path": "src/deforum/models/depth_models/zoedepth/models/layers/dist_layers.py", "snippet": "class ConditionalLogBinomial(nn.Module):\n def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):\n \"\"\"Conditional Log Binomial distribution\n\n Args:\n in_features (int): number of input channels in main feature\n condition_dim (int): number of input channels in condition feature\n n_classes (int, optional): Number of classes. Defaults to 256.\n bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.\n p_eps (float, optional): small eps value. Defaults to 1e-4.\n max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.\n min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.\n \"\"\"\n super().__init__()\n self.p_eps = p_eps\n self.max_temp = max_temp\n self.min_temp = min_temp\n self.log_binomial_transform = LogBinomial(n_classes, act=act)\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(\n nn.Conv2d(in_features + condition_dim, bottleneck,\n kernel_size=1, stride=1, padding=0),\n nn.GELU(),\n # 2 for p linear norm, 2 for t linear norm\n nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),\n nn.Softplus()\n )\n\n def forward(self, x, cond):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Main feature\n cond (torch.Tensor - NCHW): condition feature\n\n Returns:\n torch.Tensor: Output log binomial distribution\n \"\"\"\n pt = self.mlp(torch.concat((x, cond), dim=1))\n p, t = pt[:, :2, ...], pt[:, 2:, ...]\n\n p = p + self.p_eps\n p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])\n\n t = t + self.p_eps\n t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])\n t = t.unsqueeze(1)\n t = (self.max_temp - self.min_temp) * t + self.min_temp\n\n return self.log_binomial_transform(p, t)" }, { "identifier": "Projector", "path": "src/deforum/models/depth_models/zoedepth/models/layers/localbins_layers.py", "snippet": "class Projector(nn.Module):\n def __init__(self, in_features, out_features, mlp_dim=128):\n \"\"\"Projector MLP\n\n Args:\n in_features (int): input channels\n out_features (int): output channels\n mlp_dim (int, optional): hidden dimension. Defaults to 128.\n \"\"\"\n super().__init__()\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, out_features, 1, 1, 0),\n )\n\n def forward(self, x):\n return self._net(x)" }, { "identifier": "SeedBinRegressor", "path": "src/deforum/models/depth_models/zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressor(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Min depth value. Defaults to 1e-3.\n max_depth (float, optional): Max depth value. Defaults to 10.\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self.min_depth = min_depth\n self.max_depth = max_depth\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B = self._net(x)\n eps = 1e-3\n B = B + eps\n B_widths_normed = B / B.sum(dim=1, keepdim=True)\n B_widths = (self.max_depth - self.min_depth) * \\\n B_widths_normed # .shape NCHW\n # pad has the form (left, right, top, bottom, front, back)\n B_widths = nn.functional.pad(\n B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)\n B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW\n\n B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])\n return B_widths_normed, B_centers" }, { "identifier": "SeedBinRegressorUnnormed", "path": "src/deforum/models/depth_models/zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressorUnnormed(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are unbounded\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B_centers = self._net(x)\n return B_centers, B_centers" }, { "identifier": "PatchTransformerEncoder", "path": "src/deforum/models/depth_models/zoedepth/models/layers/patch_transformer.py", "snippet": "class PatchTransformerEncoder(nn.Module):\n def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False):\n \"\"\"ViT-like transformer block\n\n Args:\n in_channels (int): Input channels\n patch_size (int, optional): patch size. Defaults to 10.\n embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128.\n num_heads (int, optional): number of attention heads. Defaults to 4.\n use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as \"class token\"). Defaults to False.\n \"\"\"\n super(PatchTransformerEncoder, self).__init__()\n self.use_class_token = use_class_token\n encoder_layers = nn.TransformerEncoderLayer(\n embedding_dim, num_heads, dim_feedforward=1024)\n self.transformer_encoder = nn.TransformerEncoder(\n encoder_layers, num_layers=4) # takes shape S,N,E\n\n self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim,\n kernel_size=patch_size, stride=patch_size, padding=0)\n \n def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'):\n \"\"\"Generate positional encodings\n\n Args:\n sequence_length (int): Sequence length\n embedding_dim (int): Embedding dimension\n\n Returns:\n torch.Tensor SBE: Positional encodings\n \"\"\"\n position = torch.arange(\n 0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1)\n index = torch.arange(\n 0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0)\n div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))\n pos_encoding = position * div_term\n pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)\n pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1)\n return pos_encoding\n \n\n def forward(self, x):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Input feature tensor\n\n Returns:\n torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim\n \"\"\"\n embeddings = self.embedding_convPxP(x).flatten(\n 2) # .shape = n,c,s = n, embedding_dim, s\n if self.use_class_token:\n # extra special token at start ?\n embeddings = nn.functional.pad(embeddings, (1, 0))\n \n # change to S,N,E format required by transformer\n embeddings = embeddings.permute(2, 0, 1)\n S, N, E = embeddings.shape\n embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device)\n x = self.transformer_encoder(embeddings) # .shape = S, N, E\n return x" }, { "identifier": "load_state_from_resource", "path": "src/deforum/models/depth_models/zoedepth/models/model_io.py", "snippet": "def load_state_from_resource(model, resource: str):\n \"\"\"Loads weights to the model from a given resource. A resource can be of following types:\n 1. URL. Prefixed with \"url::\"\n e.g. url::http(s)://url.resource.com/ckpt.pt\n\n 2. Local path. Prefixed with \"local::\"\n e.g. local::/path/to/ckpt.pt\n\n\n Args:\n model (torch.nn.Module): Model\n resource (str): resource string\n\n Returns:\n torch.nn.Module: Model with loaded weights\n \"\"\"\n print(f\"Using pretrained resource {resource}\")\n\n if resource.startswith('url::'):\n url = resource.split('url::')[1]\n return load_state_dict_from_url(model, url, progress=True)\n\n elif resource.startswith('local::'):\n path = resource.split('local::')[1]\n return load_wts(model, path)\n \n else:\n raise ValueError(\"Invalid resource type, only url:: and local:: are supported\")" } ]
import itertools import torch import torch.nn as nn from ..depth_model import DepthModel from ..base_models.midas import MidasCore from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed from ..layers.dist_layers import ConditionalLogBinomial from ..layers.localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) from ..layers.patch_transformer import PatchTransformerEncoder from ..model_io import load_state_from_resource
11,174
b_prev = (seed_b_centers - min_depth) / (max_depth - min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) attractors = self.attractors[bin_conf_name] for projector, attractor, x in zip(self.projectors, attractors, x_blocks): b_embedding = projector(x) b, b_centers = attractor( b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b prev_b_embedding = b_embedding last = outconv_activation b_centers = nn.functional.interpolate( b_centers, last.shape[-2:], mode='bilinear', align_corners=True) b_embedding = nn.functional.interpolate( b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) clb = self.conditional_log_binomial[bin_conf_name] x = clb(last, b_embedding) # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor # print(x.shape, b_centers.shape) # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(domain_logits=domain_logits, metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): """ Learning rate configuration for different layers of the model Args: lr (float) : Base learning rate Returns: list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. """ param_conf = [] if self.train_midas: def get_rel_pos_params(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" in name: yield p def get_enc_params_except_rel_pos(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" not in name: yield p encoder_params = get_enc_params_except_rel_pos() rel_pos_params = get_rel_pos_params() midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 param_conf.extend([ {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, {'params': midas_params, 'lr': lr / midas_lr_factor} ]) remaining_modules = [] for name, child in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain( *[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf def get_conf_parameters(self, conf_name): """ Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ params = [] for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): if bin_conf_name == conf_name: params += list(module.parameters()) return params def freeze_conf(self, conf_name): """ Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = False def unfreeze_conf(self, conf_name): """ Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = True def freeze_all_confs(self): """ Freezes all the parameters of all the ModuleDicts children """ for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): for p in module.parameters(): p.requires_grad = False @staticmethod def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) model = ZoeDepthNK(core, **kwargs) if pretrained_resource: assert isinstance(pretrained_resource, str), "pretrained_resource must be a string"
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Shariq Farooq Bhat class ZoeDepthNK(DepthModel): def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, memory_efficient=False, train_midas=True, is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): """ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts. Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys: "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float) The length of this list determines the number of metric heads. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. """ super().__init__() self.core = core self.bin_conf = bin_conf self.min_temp = min_temp self.max_temp = max_temp self.memory_efficient = memory_efficient self.train_midas = train_midas self.is_midas_pretrained = is_midas_pretrained self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.inverse_midas = inverse_midas N_MIDAS_OUT = 32 btlnck_features = self.core.output_channels[0] num_out_features = self.core.output_channels[1:] # self.scales = [16, 8, 4, 2] # spatial scale factors self.conv2 = nn.Conv2d( btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) # Transformer classifier on the bottleneck self.patch_transformer = PatchTransformerEncoder( btlnck_features, 1, 128, use_class_token=True) self.mlp_classifier = nn.Sequential( nn.Linear(128, 128), nn.ReLU(), nn.Linear(128, 2) ) if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") self.bin_centers_type = bin_centers_type # We have bins for each bin conf. # Create a map (ModuleDict) of 'name' -> seed_bin_regressor self.seed_bin_regressors = nn.ModuleDict( {conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim // 2, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) for conf in bin_conf} ) self.seed_projector = Projector( btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) self.projectors = nn.ModuleList([ Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) for num_out in num_out_features ]) # Create a map (ModuleDict) of 'name' -> attractors (ModuleList) self.attractors = nn.ModuleDict( {conf['name']: nn.ModuleList([ Attractor(bin_embedding_dim, n_attractors[i], mlp_dim=bin_embedding_dim, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type, memory_efficient=memory_efficient, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) for i in range(len(n_attractors)) ]) for conf in bin_conf} ) last_in = N_MIDAS_OUT # conditional log binomial for each bin conf self.conditional_log_binomial = nn.ModuleDict( {conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp) for conf in bin_conf} ) def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): """ Args: x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain. return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False. denorm (bool, optional): Whether to denormalize the input image. Defaults to False. return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False. Returns: dict: Dictionary of outputs with keys: - "rel_depth": Relative depth map of shape (B, 1, H, W) - "metric_depth": Metric depth map of shape (B, 1, H, W) - "domain_logits": Domain logits of shape (B, 2) - "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True - "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True """ b, c, h, w = x.shape self.orig_input_width = w self.orig_input_height = h rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) outconv_activation = out[0] btlnck = out[1] x_blocks = out[2:] x_d0 = self.conv2(btlnck) x = x_d0 # Predict which path to take embedding = self.patch_transformer(x)[0] # N, E domain_logits = self.mlp_classifier(embedding) # N, 2 domain_vote = torch.softmax(domain_logits.sum( dim=0, keepdim=True), dim=-1) # 1, 2 # Get the path bin_conf_name = ["nyu", "kitti"][torch.argmax( domain_vote, dim=-1).squeeze().item()] try: conf = [c for c in self.bin_conf if c["name"] == bin_conf_name][0] except IndexError: raise ValueError( f"bin_conf_name {bin_conf_name} not found in bin_confs") min_depth = conf['min_depth'] max_depth = conf['max_depth'] seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] _, seed_b_centers = seed_bin_regressor(x) if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': b_prev = (seed_b_centers - min_depth) / (max_depth - min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) attractors = self.attractors[bin_conf_name] for projector, attractor, x in zip(self.projectors, attractors, x_blocks): b_embedding = projector(x) b, b_centers = attractor( b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b prev_b_embedding = b_embedding last = outconv_activation b_centers = nn.functional.interpolate( b_centers, last.shape[-2:], mode='bilinear', align_corners=True) b_embedding = nn.functional.interpolate( b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) clb = self.conditional_log_binomial[bin_conf_name] x = clb(last, b_embedding) # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor # print(x.shape, b_centers.shape) # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(domain_logits=domain_logits, metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): """ Learning rate configuration for different layers of the model Args: lr (float) : Base learning rate Returns: list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. """ param_conf = [] if self.train_midas: def get_rel_pos_params(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" in name: yield p def get_enc_params_except_rel_pos(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" not in name: yield p encoder_params = get_enc_params_except_rel_pos() rel_pos_params = get_rel_pos_params() midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 param_conf.extend([ {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, {'params': midas_params, 'lr': lr / midas_lr_factor} ]) remaining_modules = [] for name, child in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain( *[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf def get_conf_parameters(self, conf_name): """ Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ params = [] for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): if bin_conf_name == conf_name: params += list(module.parameters()) return params def freeze_conf(self, conf_name): """ Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = False def unfreeze_conf(self, conf_name): """ Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = True def freeze_all_confs(self): """ Freezes all the parameters of all the ModuleDicts children """ for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): for p in module.parameters(): p.requires_grad = False @staticmethod def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) model = ZoeDepthNK(core, **kwargs) if pretrained_resource: assert isinstance(pretrained_resource, str), "pretrained_resource must be a string"
model = load_state_from_resource(model, pretrained_resource)
9
2023-10-28 14:23:27+00:00
16k
samholt/ActiveObservingInContinuous-timeControl
mppi_dataset_collector.py
[ { "identifier": "dotdict", "path": "config.py", "snippet": "class dotdict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__" }, { "identifier": "create_env", "path": "overlay.py", "snippet": "def create_env(env_name, dt=0.05, ts_grid=\"fixed\", noise=0.0, friction=False, device=device):\n if \"oderl\" in env_name:\n env = create_oderl_env(env_name, dt=dt, ts_grid=ts_grid, noise=noise, friction=friction, device=device)\n else:\n env = gym.make(env_name)\n return env" }, { "identifier": "setup_logger", "path": "overlay.py", "snippet": "def setup_logger(file, log_folder=\"logs\", return_path_to_log=False):\n import logging\n import os\n import time\n\n file_name = os.path.basename(os.path.realpath(file)).split(\".py\")[0]\n from pathlib import Path\n\n Path(f\"./{log_folder}\").mkdir(parents=True, exist_ok=True)\n path_run_name = \"{}-{}\".format(file_name, time.strftime(\"%Y%m%d-%H%M%S\"))\n logging.basicConfig(\n format=\"%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s\",\n handlers=[\n logging.FileHandler(f\"{log_folder}/{path_run_name}_log.txt\"),\n logging.StreamHandler(),\n ],\n datefmt=\"%H:%M:%S\",\n level=logging.INFO,\n )\n logger = logging.getLogger()\n logger.info(f\"Starting: Log file at: {log_folder}/{path_run_name}_log.txt\")\n if return_path_to_log:\n return logger, f\"{log_folder}/{path_run_name}_log.txt\"\n else:\n return logger" }, { "identifier": "start_virtual_display", "path": "overlay.py", "snippet": "def start_virtual_display():\n import pyvirtualdisplay\n\n return pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()" }, { "identifier": "step_env", "path": "overlay.py", "snippet": "def step_env(env, action, obs_noise):\n at = torch.from_numpy(action).to(device)\n\n def g(state, t):\n return at\n\n returns = env.integrate_system(2, g, s0=torch.tensor(env.state).to(device), return_states=True)\n state = returns[-1][-1]\n reward = returns[2][-1]\n tsn = returns[-2][-1, -1]\n env.set_state_(state.cpu().numpy())\n state_out = env.get_obs()\n state_out = torch.from_numpy(state_out).to(device)\n state_out += torch.randn_like(state_out) * obs_noise\n env.time_step += 1\n done = True if env.time_step >= env.n_steps else False\n state_out = state_out.cpu().numpy()\n return state_out, reward, done, tsn" }, { "identifier": "MPPI", "path": "planners/mppi.py", "snippet": "class MPPI:\n \"\"\"\n Model Predictive Path Integral control\n This implementation batch samples the trajectories and so scales well with the number of samples K.\n\n Implemented according to algorithm 2 in Williams et al., 2017\n 'Information Theoretic MPC for Model-Based Reinforcement Learning',\n based off of https://github.com/ferreirafabio/mppi_pendulum\n \"\"\"\n\n def __init__(\n self,\n dynamics,\n running_cost,\n nx,\n noise_sigma,\n num_samples=100,\n horizon=15,\n device=\"cuda:0\",\n terminal_state_cost=None,\n lambda_=1.0,\n noise_mu=None,\n u_min=None,\n u_max=None,\n u_init=None,\n U_init=None,\n u_scale=1,\n u_per_command=1,\n step_dependent_dynamics=False,\n rollout_samples=1, # Ensemble size\n rollout_var_cost=0,\n rollout_var_discount=0.95,\n dt=0.05,\n sample_null_action=False,\n noise_abs_cost=False,\n ):\n \"\"\"\n :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)\n :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)\n :param nx: state dimension\n :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))\n :param num_samples: K, number of trajectories to sample\n :param horizon: T, length of each trajectory\n :param device: pytorch device\n :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state\n :param lambda_: temperature, positive scalar where larger values will allow more exploration\n :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean\n :param u_min: (nu) minimum values for each dimension of control to pass into dynamics\n :param u_max: (nu) maximum values for each dimension of control to pass into dynamics\n :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero\n :param U_init: (T x nu) initial control sequence; defaults to noise\n :param step_dependent_dynamics: whether the passed in dynamics needs horizon step passed in (as 3rd arg)\n :param rollout_samples: M, number of state trajectories to rollout for each control trajectory\n (should be 1 for deterministic dynamics and more for models that output a distribution)\n :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts\n :param rollout_var_discount: Discount of variance cost over control horizon\n :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)\n :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost\n \"\"\"\n self.d = device\n self.dtype = noise_sigma.dtype\n self.K = num_samples # N_SAMPLES\n self.T = horizon # TIMESTEPS\n self.dt = dt\n\n # dimensions of state and control\n self.nx = nx\n self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]\n self.lambda_ = lambda_\n\n if noise_mu is None:\n noise_mu = torch.zeros(self.nu, dtype=self.dtype)\n\n if u_init is None:\n u_init = torch.zeros_like(noise_mu)\n\n # handle 1D edge case\n if self.nu == 1:\n noise_mu = noise_mu.view(-1)\n noise_sigma = noise_sigma.view(-1, 1)\n\n # bounds\n self.u_min = u_min\n self.u_max = u_max\n self.u_scale = u_scale\n self.u_per_command = u_per_command\n # make sure if any of them is specified, both are specified\n if self.u_max is not None and self.u_min is None:\n if not torch.is_tensor(self.u_max):\n self.u_max = torch.tensor(self.u_max)\n self.u_min = -self.u_max\n if self.u_min is not None and self.u_max is None:\n if not torch.is_tensor(self.u_min):\n self.u_min = torch.tensor(self.u_min)\n self.u_max = -self.u_min\n if self.u_min is not None:\n self.u_min = self.u_min.to(device=self.d)\n self.u_max = self.u_max.to(device=self.d)\n\n self.noise_mu = noise_mu.to(self.d)\n self.noise_sigma = noise_sigma.to(self.d)\n self.noise_sigma_inv = torch.inverse(self.noise_sigma)\n self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)\n # T x nu control sequence\n self.U = U_init\n self.u_init = u_init.to(self.d)\n\n if self.U is None:\n self.U = self.noise_dist.sample((self.T,))\n\n self.step_dependency = step_dependent_dynamics\n self.F = dynamics\n self.running_cost = running_cost\n self.terminal_state_cost = terminal_state_cost\n self.sample_null_action = sample_null_action\n self.noise_abs_cost = noise_abs_cost\n self.state = None\n\n # handling dynamics models that output a distribution (take multiple trajectory samples)\n self.M = rollout_samples\n self.rollout_var_cost = rollout_var_cost\n self.rollout_var_discount = rollout_var_discount\n\n # sampled results from last command\n self.cost_total = None\n self.cost_total_non_zero = None\n self.omega = None\n self.states_mu = None\n self.states_var = None\n self.actions = None\n\n def _dynamics(self, state, u, t):\n return self.F(state, u, t) if self.step_dependency else self.F(state, u)\n\n # @handle_batch_input\n def _running_cost(self, state, u):\n return self.running_cost(state, u)\n\n def command(self, state):\n \"\"\"\n :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)\n :returns action: (nu) best action\n \"\"\"\n # shift command 1 time step\n self.U = torch.roll(self.U, -1, dims=0)\n self.U[-1] = self.u_init\n\n if not torch.is_tensor(state):\n state = torch.tensor(state)\n self.state = state.to(dtype=self.dtype, device=self.d)\n\n cost_total = self._compute_total_cost_batch()\n logger.debug(f\"cost_total: {cost_total.shape}\")\n\n beta = torch.min(cost_total)\n self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)\n\n eta = torch.sum(self.cost_total_non_zero)\n self.omega = (1.0 / eta) * self.cost_total_non_zero\n for t in range(self.T):\n self.U[t] += torch.sum(self.omega.view(-1, 1) * self.noise[:, t], dim=0)\n action = self.U[: self.u_per_command]\n # reduce dimensionality if we only need the first command\n if self.u_per_command == 1:\n action = action[0]\n\n logger.debug(f\"action: {action}\")\n return action * self.u_scale\n\n def reset(self):\n \"\"\"\n Clear controller state after finishing a trial\n \"\"\"\n self.U = self.noise_dist.sample((self.T,))\n\n def _compute_rollout_costs(self, perturbed_actions):\n K, T, nu = perturbed_actions.shape\n assert nu == self.nu\n\n cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)\n cost_samples = cost_total.repeat(self.M, 1)\n cost_var = torch.zeros_like(cost_total)\n\n # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state\n if self.state.shape == (K, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(K, 1)\n\n logger.debug(f\"state: {state_mu.shape}\")\n\n states_mu = []\n states_var = []\n actions = []\n perturbed_actions = self.u_scale * perturbed_actions\n for t in range(T):\n u = perturbed_actions[:, t, :]\n state_mu, state_var = self._dynamics(state_mu, u, t)\n c = self._running_cost(state_mu, u)\n cost_samples += c\n if self.M > 1:\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n states_mu.append(state_mu)\n states_var.append(state_var)\n actions.append(u)\n\n # Actions is K x T x nu\n # States is K x T x nx\n actions = torch.stack(actions, dim=-2)\n states_mu = torch.stack(states_mu, dim=-2)\n states_var = torch.stack(states_var, dim=-2)\n logger.debug(f\"states: {states_mu.shape}\")\n\n # action perturbation cost\n if self.terminal_state_cost:\n c = self.terminal_state_cost(states_mu, actions)\n cost_samples += c\n cost_total += cost_samples.mean(dim=0)\n cost_total += cost_var * self.rollout_var_cost\n logger.debug(f\"{cost_total.shape} | {states_mu.shape} | {actions.shape}\")\n return cost_total, states_mu, states_var, actions\n\n def _compute_total_cost_batch(self):\n # parallelize sampling across trajectories\n # resample noise each time we take an action\n self.noise = self.noise_dist.sample((self.K, self.T)) # K x T x nu\n self.perturbed_action = self.U + self.noise\n if self.sample_null_action:\n self.perturbed_action[self.K - 1] = 0\n # naively bound control\n self.perturbed_action = self._bound_action(self.perturbed_action * self.u_scale)\n self.perturbed_action /= self.u_scale\n # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)\n self.noise = self.perturbed_action - self.U\n if self.noise_abs_cost:\n action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv\n # NOTE: The original paper does self.lambda_ * self.noise @ self.noise_sigma_inv, but this biases\n # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the\n # nominal trajectory.\n else:\n action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper\n logger.debug(f\"action_cost: {action_cost.shape}\")\n\n self.cost_total, self.states_mu, self.states_var, self.actions = self._compute_rollout_costs(\n self.perturbed_action\n )\n self.actions /= self.u_scale\n\n # action perturbation cost\n perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2))\n self.cost_total += perturbation_cost\n return self.cost_total\n\n def _bound_action(self, action):\n if self.u_max is not None:\n action = torch.clamp(action, min=self.u_min, max=self.u_max)\n return action\n\n def get_rollouts(self, state, num_rollouts=1):\n \"\"\"\n :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states\n :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic\n dynamics\n :returns states: num_rollouts x T x nx vector of trajectories\n\n \"\"\"\n state = state.view(-1, self.nx)\n if state.size(0) == 1:\n state = state.repeat(num_rollouts, 1)\n\n T = self.U.shape[0]\n states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=self.U.dtype, device=self.U.device)\n states[:, 0] = state\n for t in range(T):\n states[:, t + 1] = self._dynamics(\n states[:, t].view(num_rollouts, -1), self.u_scale * self.U[t].view(num_rollouts, -1), t\n )\n return states[:, 1:]" }, { "identifier": "MPPIActiveObserving", "path": "planners/mppi_active_observing.py", "snippet": "class MPPIActiveObserving:\n \"\"\"\n Model Predictive Path Integral control\n This implementation batch samples the trajectories and so scales well with the number of samples K.\n\n Implemented according to algorithm 2 in Williams et al., 2017\n 'Information Theoretic MPC for Model-Based Reinforcement Learning',\n based off of https://github.com/ferreirafabio/mppi_pendulum\n \"\"\"\n\n def __init__(\n self,\n dynamics,\n running_cost,\n nx,\n noise_sigma,\n cost_var_from_state_var=None,\n num_samples=100,\n horizon=15,\n device=\"cuda:0\",\n terminal_state_cost=None,\n observing_var_threshold=1.0,\n lambda_=1.0,\n noise_mu=None,\n u_min=None,\n u_max=None,\n u_init=None,\n U_init=None,\n u_scale=1,\n u_per_command=1,\n rollout_samples=1, # Ensemble size\n rollout_var_cost=0,\n rollout_var_discount=0.95,\n dt_simulation=0.01,\n dt=0.05,\n sampling_policy=\"discrete_planning\",\n continuous_time_threshold=0.5,\n observing_cost=1.0,\n sample_null_action=False,\n observing_fixed_frequency=1,\n discrete_planning=False,\n discrete_interval=1,\n limit_actions_to_only_positive=False,\n fixed_continuous_planning_steps=None,\n debug_mode_return_full_cost_std=False,\n debug_mode_cp_return_continuous_reward_unc=False,\n noise_abs_cost=False,\n ):\n \"\"\"\n :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)\n :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)\n :param nx: state dimension\n :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))\n :param num_samples: K, number of trajectories to sample\n :param horizon: T, length of each trajectory\n :param device: pytorch device\n :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state\n :param lambda_: temperature, positive scalar where larger values will allow more exploration\n :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean\n :param u_min: (nu) minimum values for each dimension of control to pass into dynamics\n :param u_max: (nu) maximum values for each dimension of control to pass into dynamics\n :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero\n :param U_init: (T x nu) initial control sequence; defaults to noise\n :param rollout_samples: M, number of state trajectories to rollout for each control trajectory\n (should be 1 for deterministic dynamics and more for models that output a distribution)\n :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts\n :param rollout_var_discount: Discount of variance cost over control horizon\n :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)\n :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost\n \"\"\"\n self.d = device\n self.dt_simulation = dt_simulation\n if discrete_planning:\n dt_plan = dt_simulation * discrete_interval\n else:\n dt_plan = dt\n self.discrete_planning = discrete_planning\n self.discrete_interval = discrete_interval\n self.limit_actions_to_only_positive = limit_actions_to_only_positive\n self.continuous_time_interval = max(int(continuous_time_threshold * discrete_interval), 1)\n self.dtype = noise_sigma.dtype\n self.K = num_samples # N_SAMPLES\n self.T = horizon # TIMESTEPS\n self.dt = dt_plan\n self.observing_cost = observing_cost # Hyperparameter to be tuned\n self.observing_var_threshold = observing_var_threshold # Hyperparameter to be tuned\n self.observing_fixed_frequency = observing_fixed_frequency\n\n # dimensions of state and control\n self.nx = nx\n self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]\n self.lambda_ = lambda_\n\n if noise_mu is None:\n noise_mu = torch.zeros(self.nu, dtype=self.dtype)\n\n if u_init is None:\n u_init = torch.zeros_like(noise_mu)\n\n # handle 1D edge case\n if self.nu == 1:\n noise_mu = noise_mu.view(-1)\n noise_sigma = noise_sigma.view(-1, 1)\n\n # bounds\n self.u_min = u_min\n self.u_max = u_max\n self.u_scale = u_scale\n self.u_per_command = u_per_command\n # make sure if any of them is specified, both are specified\n if self.u_max is not None and self.u_min is None:\n if not torch.is_tensor(self.u_max):\n self.u_max = torch.tensor(self.u_max)\n self.u_min = -self.u_max\n if self.u_min is not None and self.u_max is None:\n if not torch.is_tensor(self.u_min):\n self.u_min = torch.tensor(self.u_min)\n self.u_max = -self.u_min\n if self.u_min is not None:\n self.u_min = self.u_min.to(device=self.d)\n self.u_max = self.u_max.to(device=self.d)\n\n self.noise_mu = noise_mu.to(self.d)\n self.noise_sigma = noise_sigma.to(self.d)\n self.noise_sigma_inv = torch.inverse(self.noise_sigma)\n self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)\n # T x nu control sequence\n self.U = U_init\n self.u_init = u_init.to(self.d)\n\n if self.U is None:\n self.U = self.noise_dist.sample((self.T,))\n\n self.F = dynamics\n self.running_cost = running_cost\n self.terminal_state_cost = terminal_state_cost\n self.sample_null_action = sample_null_action\n self.noise_abs_cost = noise_abs_cost\n self.state = None\n\n # handling dynamics models that output a distribution (take multiple trajectory samples)\n self.M = rollout_samples\n self.rollout_var_cost = rollout_var_cost\n self.rollout_var_discount = rollout_var_discount\n\n # sampled results from last command\n self.cost_total = None\n self.cost_total_non_zero = None\n self.omega = None\n self.states_mu = None\n self.states_var = None\n self.actions = None\n\n self.sampling_policy = sampling_policy\n self.cost_var_from_state_var = cost_var_from_state_var\n\n self.previous_step = 0\n self.fixed_continuous_planning_steps = fixed_continuous_planning_steps\n self.debug_mode_return_full_cost_std = debug_mode_return_full_cost_std\n self.debug_mode_cp_return_continuous_reward_unc = debug_mode_cp_return_continuous_reward_unc\n\n def _dynamics(self, state, u, ts_pred, return_var=True):\n if self.limit_actions_to_only_positive:\n u[u <= 0] = 0\n return self.F(state, u, ts_pred, return_var=return_var)\n\n def _cost_var_from_state_var(self, state_var):\n if not self.cost_var_from_state_var is None:\n return self.cost_var_from_state_var(state_var)\n else:\n return state_var.sum()\n\n # @handle_batch_input\n def _running_cost(self, state, u):\n return self.running_cost(state, u)\n\n def reset(self):\n \"\"\"\n Clear controller state after finishing a trial\n \"\"\"\n self.U = self.noise_dist.sample((self.T,))\n\n def _compute_rollout_costs(self, perturbed_actions):\n K, T, nu = perturbed_actions.shape\n assert nu == self.nu\n\n cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)\n cost_samples = cost_total.repeat(self.M, 1)\n cost_var = torch.zeros_like(cost_total)\n\n # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state\n if self.state.shape == (K, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(K, 1)\n\n logger.debug(f\"state: {state_mu.shape}\")\n\n states_mu = []\n # states_var = []\n actions = []\n perturbed_actions = self.u_scale * perturbed_actions\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(K, 1)\n\n for t in range(T):\n u = perturbed_actions[:, t, :]\n state_mu, _ = self._dynamics(state_mu, u, ts_pred, return_var=False)\n c = self._running_cost(state_mu, u)\n cost_samples += c\n if self.M > 1:\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n states_mu.append(state_mu)\n actions.append(u)\n\n # Actions is K x T x nu\n # States is K x T x nx\n actions = torch.stack(actions, dim=-2)\n states_mu = torch.stack(states_mu, dim=-2)\n logger.debug(f\"states: {states_mu.shape}\")\n\n # action perturbation cost\n if self.terminal_state_cost:\n c = self.terminal_state_cost(states_mu, actions)\n cost_samples += c\n cost_total += cost_samples.mean(dim=0)\n cost_total += cost_var * self.rollout_var_cost\n logger.debug(f\"{cost_total.shape} | {states_mu.shape} | {actions.shape}\")\n return cost_total, states_mu, actions\n\n def _compute_total_cost_batch(self):\n # parallelize sampling across trajectories\n # resample noise each time we take an action\n self.noise = self.noise_dist.sample((self.K, self.T)) # K x T x nu\n self.perturbed_action = self.U + self.noise\n if self.sample_null_action:\n self.perturbed_action[self.K - 1] = 0\n # naively bound control\n self.perturbed_action = self._bound_action(self.perturbed_action * self.u_scale)\n self.perturbed_action /= self.u_scale\n # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)\n self.noise = self.perturbed_action - self.U\n if self.noise_abs_cost:\n action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv\n # NOTE: The original paper does self.lambda_ * self.noise @ self.noise_sigma_inv, but this biases\n # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the\n # nominal trajectory.\n else:\n action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper\n logger.debug(f\"action_cost: {action_cost.shape}\")\n\n self.cost_total, self.states_mu, self.actions = self._compute_rollout_costs(self.perturbed_action)\n self.actions /= self.u_scale\n\n # action perturbation cost\n perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2)) # wonder if can remove?\n self.cost_total += perturbation_cost\n return self.cost_total\n\n def _bound_action(self, action):\n if self.u_max is not None:\n action = torch.clamp(action, min=self.u_min, max=self.u_max)\n return action\n\n def get_rollouts(self, state, num_rollouts=1):\n \"\"\"\n :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states\n :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic\n dynamics\n :returns states: num_rollouts x T x nx vector of trajectories\n\n \"\"\"\n state = state.view(-1, self.nx)\n if state.size(0) == 1:\n state = state.repeat(num_rollouts, 1)\n\n T = self.U.shape[0]\n states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=self.U.dtype, device=self.U.device)\n states[:, 0] = state\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(num_rollouts, 1)\n for t in range(T):\n states[:, t + 1] = self._dynamics(\n states[:, t].view(num_rollouts, -1), self.u_scale * self.U[t].view(num_rollouts, -1), ts_pred\n )\n return states[:, 1:]\n\n def command(self, state):\n \"\"\"\n :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)\n :returns action: (nu) best action\n \"\"\"\n self.U = torch.zeros_like(self.U)\n\n if not torch.is_tensor(state):\n state = torch.tensor(state)\n self.state = state.to(dtype=self.dtype, device=self.d)\n assert not torch.isnan(state).any(), \"Nan detected in state\"\n\n cost_total = self._compute_total_cost_batch()\n logger.debug(f\"cost_total: {cost_total.shape}\")\n\n beta = torch.min(cost_total)\n self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)\n\n eta = torch.sum(self.cost_total_non_zero)\n self.omega = (1.0 / eta) * self.cost_total_non_zero\n for t in range(self.T):\n self.U[t] += torch.sum(self.omega.view(-1, 1) * self.noise[:, t], dim=0)\n\n # Calculate the state estimate of the reward here, then use that for planning etc.\n if self.debug_mode_cp_return_continuous_reward_unc and self.sampling_policy == \"continuous_planning\":\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n ts_pred_increment = (\n torch.arange(self.dt_simulation, self.dt, self.dt_simulation, device=self.d, dtype=self.dtype)\n .repeat_interleave(L)\n .view(-1, 1)\n )\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n state_mu_in = state_mu\n costs_std = []\n costs_std.append(torch.tensor(0, device=self.d, dtype=self.dtype).view(1))\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n for t in range(self.T):\n u = same_actions[:, t, :]\n # Core parts\n state_mu_pred, state_var_pred = self._dynamics(state_mu_in, u, ts_pred, return_var=True)\n state_mu_final = state_mu_pred + torch.normal(0, 1, size=state_mu_pred.shape).to(self.d) * torch.sqrt(\n state_var_pred\n )\n c = self._running_cost(state_mu_final, u)\n # Intermediate states\n intermediate_state_count = self.discrete_interval - 1\n state_mu_pred_increment, state_var_pred_increment = self._dynamics(\n state_mu_in.repeat(intermediate_state_count, 1),\n u.repeat(intermediate_state_count, 1),\n ts_pred_increment,\n return_var=True,\n )\n state_mu_increment = state_mu_pred_increment + torch.normal(\n 0, 1, size=state_mu_pred_increment.shape\n ).to(self.d) * torch.sqrt(state_var_pred_increment)\n c_increment = self._running_cost(state_mu_increment, u.repeat(intermediate_state_count, 1))\n inter_c_stds = c_increment.view(intermediate_state_count, -1).std(dim=1)\n costs_std.append(torch.cat((inter_c_stds, c.std().view(1))))\n state_mu_in = state_mu_final\n # States is K x T x nx\n costs_std_continuous = torch.cat(costs_std)[1:]\n stats = {\n \"costs_std_median\": costs_std_continuous.median().item(),\n \"costs_std_mean\": costs_std_continuous.mean().item(),\n \"costs_std_max\": costs_std_continuous.max().item(),\n }\n if self.debug_mode_return_full_cost_std:\n return torch.cat(costs_std).cpu()\n elif self.sampling_policy == \"active_observing_control\":\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n ts_pred_increment = (\n torch.arange(self.dt_simulation, self.dt, self.dt_simulation, device=self.d, dtype=self.dtype)\n .repeat_interleave(L)\n .view(-1, 1)\n )\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n state_mu_in = state_mu\n costs_std = []\n costs_std.append(torch.tensor(0, device=self.d, dtype=self.dtype).view(1))\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n select_actions_up_to = self.T * self.discrete_interval # Initial default value\n for t in range(self.T):\n u = same_actions[:, t, :]\n # Core parts\n state_mu_pred, state_var_pred = self._dynamics(state_mu_in, u, ts_pred, return_var=True)\n state_mu_final = state_mu_pred + torch.normal(0, 1, size=state_mu_pred.shape).to(self.d) * torch.sqrt(\n state_var_pred\n )\n c = self._running_cost(state_mu_final, u)\n if c.std() >= self.observing_var_threshold:\n t_upper = ts_pred.view(-1)[0]\n t_lower = torch.tensor(0.0).to(self.d)\n while (t_upper - t_lower) > self.dt_simulation:\n t_mid = (t_upper + t_lower) / 2.0\n state_mu_pred_increment, state_var_pred_increment = self._dynamics(\n state_mu_in, u, torch.ones_like(ts_pred) * t_mid, return_var=True\n )\n state_mu_increment = state_mu_pred_increment + torch.normal(\n 0, 1, size=state_mu_pred_increment.shape\n ).to(self.d) * torch.sqrt(state_var_pred_increment)\n c_increment = self._running_cost(state_mu_increment, u)\n if c_increment.std() >= self.observing_var_threshold:\n t_upper = t_mid\n else:\n t_lower = t_mid\n select_actions_up_to = (\n t * self.discrete_interval\n + torch.floor((t_mid / ts_pred.view(-1)[0]) * self.discrete_interval).int().item()\n )\n break\n state_mu_in = state_mu_final\n stats = {}\n else:\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n states_mu = []\n states_var = []\n costs = []\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n for t in range(self.T):\n u = same_actions[:, t, :]\n state_mu, state_var = self._dynamics(state_mu, u, ts_pred, return_var=True)\n state_mu = state_mu + torch.normal(0, 1, size=state_mu.shape).to(self.d) * torch.sqrt(state_var)\n c = self._running_cost(state_mu, u)\n if self.M > 1: # Untested, however should underperform - MPPI with uncertaintity paper\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n costs.append(c)\n states_mu.append(state_mu)\n states_var.append(state_var)\n\n # States is K x T x nx\n states_mu = torch.stack(states_mu, dim=-2)\n states_var = torch.stack(states_var, dim=-2)\n costs = torch.stack(costs, dim=-2)\n costs_std_discrete = torch.cat(\n (torch.tensor(0, device=self.d, dtype=self.dtype).view(1), costs.std(dim=1))\n )[1:]\n stats = {\n \"costs_std_median\": costs_std_discrete.median().item(),\n \"costs_std_mean\": costs_std_discrete.mean().item(),\n \"costs_std_max\": costs_std_discrete.max().item(),\n }\n if self.debug_mode_return_full_cost_std:\n return (\n torch.cat((torch.tensor(0, device=self.d, dtype=self.dtype).view(1), costs.std(dim=1)))\n .repeat_interleave(self.discrete_interval)\n .cpu()\n )\n\n if self.sampling_policy == \"discrete_monitoring\":\n actions = self.U[costs_std_discrete < self.observing_var_threshold]\n if actions.shape[0] == 0:\n actions = self.U[: self.u_per_command]\n costs_std_discrete = costs_std_discrete[: self.u_per_command]\n else:\n costs_std_discrete = costs_std_discrete[costs_std_discrete < self.observing_var_threshold]\n elif self.sampling_policy == \"discrete_planning\" or self.sampling_policy == \"continuous_planning\":\n if self.fixed_continuous_planning_steps is None:\n if not self.debug_mode_cp_return_continuous_reward_unc:\n actions = self.U[: self.observing_fixed_frequency]\n costs_std_discrete = costs_std_discrete[: self.observing_fixed_frequency]\n else:\n actions = self.U[: self.observing_fixed_frequency]\n costs_std_continuous = costs_std_continuous[\n : self.observing_fixed_frequency * self.continuous_time_interval\n ]\n costs_std_discrete = torch.tensor(0, device=self.d, dtype=self.dtype).view(1)\n else:\n actions = self.U\n costs_std_discrete = costs_std_discrete\n elif self.sampling_policy == \"active_observing_control\":\n actions = self.U\n actions = actions.repeat_interleave(self.discrete_interval, dim=0)\n slice_to_take_holder = torch.zeros((actions.shape[0])).bool()\n slice_to_take_holder[:select_actions_up_to] = True\n actions = actions[slice_to_take_holder]\n if actions.shape[0] <= (self.continuous_time_interval - 1):\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n actions = self.U.repeat_interleave(self.discrete_interval, dim=0)\n actions = actions[: self.continuous_time_interval]\n else:\n self.previous_step = int(actions.shape[0] / self.discrete_interval)\n assert not torch.isnan(actions).any(), \"Nan detected in actions\"\n costs_std_continuous = torch.ones_like(actions).to(self.d)\n return actions * self.u_scale, costs_std_continuous, stats\n else:\n raise NotImplementedError(f\"sampling_policy: {self.sampling_policy} not recognized\")\n self.previous_step = actions.shape[0]\n assert not torch.isnan(actions).any(), \"Nan detected in actions\"\n if self.discrete_planning:\n actions = actions.repeat_interleave(self.discrete_interval, dim=0)\n costs_std_discrete = costs_std_discrete.repeat_interleave(self.discrete_interval, dim=0)\n if self.sampling_policy == \"continuous_planning\":\n if self.fixed_continuous_planning_steps is None:\n actions = actions[: self.continuous_time_interval]\n if not self.debug_mode_cp_return_continuous_reward_unc:\n costs_std_discrete = costs_std_discrete[: self.continuous_time_interval]\n else:\n costs_std_discrete = costs_std_continuous\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n else:\n actions = actions[: self.fixed_continuous_planning_steps]\n costs_std_discrete = costs_std_discrete[: self.fixed_continuous_planning_steps]\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n return actions * self.u_scale, costs_std_discrete, stats" } ]
import logging import os import time import imageio import numpy as np import torch import torch.multiprocessing as multiprocessing from functools import partial from tqdm import tqdm from config import dotdict from overlay import create_env, setup_logger, start_virtual_display, step_env from planners.mppi import MPPI from planners.mppi_active_observing import MPPIActiveObserving from oracle import pendulum_dynamics_dt from oracle import cartpole_dynamics_dt from oracle import acrobot_dynamics_dt from oracle import cancer_dynamics_dt from pathlib import Path from config import get_config, seed_all
10,832
env_name, roll_outs=1000, time_steps=30, lambda_=1.0, sigma=1.0, dt=0.05, model_seed=11, save_video=False, state_constraint=False, change_goal=False, encode_obs_time=False, model=None, uniq=None, log_debug=False, episodes_per_sampler_task=10, config={}, iter_=200, change_goal_flipped_iter_=False, ts_grid="exp", intermediate_run=False, ): config = dotdict(config) env = create_env(env_name, dt=dt, ts_grid=ts_grid, friction=config.friction) ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] if env_name == "oderl-cancer": limit_actions_to_only_positive = True else: limit_actions_to_only_positive = False nx = env.get_obs().shape[0] nu = env.action_space.shape[0] dtype = torch.float32 gamma = sigma**2 off_diagonal = 0.5 * gamma mppi_noise_sigma = torch.ones((nu, nu), device=device, dtype=dtype) * off_diagonal + torch.eye( nu, device=device, dtype=dtype ) * (gamma - off_diagonal) logger.info(mppi_noise_sigma) mppi_lambda_ = 1.0 random_action_noise = config.collect_expert_random_action_noise if model_name == "random": def dynamics(state, perturbed_action): pass elif model_name == "oracle": oracle_sigma = config.observation_noise if env_name == "oderl-pendulum": dynamics_oracle = pendulum_dynamics_dt elif env_name == "oderl-cartpole": dynamics_oracle = cartpole_dynamics_dt elif env_name == "oderl-acrobot": dynamics_oracle = acrobot_dynamics_dt elif env_name == "oderl-cancer": dynamics_oracle = cancer_dynamics_dt def dynamics(*args, **kwargs): state_mu = dynamics_oracle(*args, **kwargs) return state_mu, torch.ones_like(state_mu) * oracle_sigma dynamics = partial(dynamics, friction=config.friction) def running_cost(state, action): if state_constraint: reward = env.diff_obs_reward_( state, exp_reward=False, state_constraint=state_constraint ) + env.diff_ac_reward_(action) elif change_goal: global change_goal_flipped reward = env.diff_obs_reward_( state, exp_reward=False, change_goal=change_goal, change_goal_flipped=change_goal_flipped ) + env.diff_ac_reward_(action) else: reward = env.diff_obs_reward_(state, exp_reward=False) + env.diff_ac_reward_(action) cost = -reward return cost if config.planner == "mppi": mppi_gym = MPPI( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, ) elif config.planner == "mppi_active_observing": mppi_gym = MPPIActiveObserving( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, observing_cost=config.observing_cost, sampling_policy=config.sampling_policy, observing_var_threshold=config.observing_var_threshold, limit_actions_to_only_positive=limit_actions_to_only_positive, dt=dt, ) if save_video:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger = logging.getLogger() def inner_mppi_with_model_collect_data( seed, model_name, env_name, roll_outs=1000, time_steps=30, lambda_=1.0, sigma=1.0, dt=0.05, model_seed=11, save_video=False, state_constraint=False, change_goal=False, encode_obs_time=False, model=None, uniq=None, log_debug=False, episodes_per_sampler_task=10, config={}, iter_=200, change_goal_flipped_iter_=False, ts_grid="exp", intermediate_run=False, ): config = dotdict(config) env = create_env(env_name, dt=dt, ts_grid=ts_grid, friction=config.friction) ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] if env_name == "oderl-cancer": limit_actions_to_only_positive = True else: limit_actions_to_only_positive = False nx = env.get_obs().shape[0] nu = env.action_space.shape[0] dtype = torch.float32 gamma = sigma**2 off_diagonal = 0.5 * gamma mppi_noise_sigma = torch.ones((nu, nu), device=device, dtype=dtype) * off_diagonal + torch.eye( nu, device=device, dtype=dtype ) * (gamma - off_diagonal) logger.info(mppi_noise_sigma) mppi_lambda_ = 1.0 random_action_noise = config.collect_expert_random_action_noise if model_name == "random": def dynamics(state, perturbed_action): pass elif model_name == "oracle": oracle_sigma = config.observation_noise if env_name == "oderl-pendulum": dynamics_oracle = pendulum_dynamics_dt elif env_name == "oderl-cartpole": dynamics_oracle = cartpole_dynamics_dt elif env_name == "oderl-acrobot": dynamics_oracle = acrobot_dynamics_dt elif env_name == "oderl-cancer": dynamics_oracle = cancer_dynamics_dt def dynamics(*args, **kwargs): state_mu = dynamics_oracle(*args, **kwargs) return state_mu, torch.ones_like(state_mu) * oracle_sigma dynamics = partial(dynamics, friction=config.friction) def running_cost(state, action): if state_constraint: reward = env.diff_obs_reward_( state, exp_reward=False, state_constraint=state_constraint ) + env.diff_ac_reward_(action) elif change_goal: global change_goal_flipped reward = env.diff_obs_reward_( state, exp_reward=False, change_goal=change_goal, change_goal_flipped=change_goal_flipped ) + env.diff_ac_reward_(action) else: reward = env.diff_obs_reward_(state, exp_reward=False) + env.diff_ac_reward_(action) cost = -reward return cost if config.planner == "mppi": mppi_gym = MPPI( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, ) elif config.planner == "mppi_active_observing": mppi_gym = MPPIActiveObserving( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, observing_cost=config.observing_cost, sampling_policy=config.sampling_policy, observing_var_threshold=config.observing_var_threshold, limit_actions_to_only_positive=limit_actions_to_only_positive, dt=dt, ) if save_video:
start_virtual_display()
3
2023-10-24 16:19:14+00:00
16k
s1tools/s1-etad
s1etad/_jupyter_support.py
[ { "identifier": "Sentinel1Etad", "path": "s1etad/product.py", "snippet": "class Sentinel1Etad:\n \"\"\"Sentinel-1 ETAD product.\n\n Class to decode and access the elements of the Sentinel ETAD product\n which specification is governed by ETAD-DLR-PS-0014.\n\n The index operator [] (implemented with the __getitem__ method) returns\n a Sentinel1EtadSwath instance.\n\n Parameters\n ----------\n product : str or pathlib.Path\n path of the S1-ETAD product (it is a directory)\n\n Attributes\n ----------\n product : pathlib.Path\n path of the S1-ETAD product (it is a directory)\n burst_catalogue : pandas.DataFrame\n dataframe containing main information of all bursts present in\n the product\n ds : netCDF.Dataset\n (provisional) the NetCDF.Dataset in which data are stored\n \"\"\"\n\n def __init__(self, product):\n # TODO: make this read-only (property)\n self.product = pathlib.Path(product)\n # TODO: ds should not be exposed\n self.ds = self._init_measurement_dataset()\n self._annot = self._init_annotation_dataset()\n self.burst_catalogue = self._init_burst_catalogue()\n\n def _init_measurement_dataset(self):\n \"\"\"Open the nc dataset.\"\"\"\n # @TODO: retrieve form manifest\n netcdf_file = next(self.product.glob(\"measurement/*.nc\"))\n rootgrp = Dataset(netcdf_file, \"r\")\n rootgrp.set_auto_mask(False)\n return rootgrp\n\n def _init_annotation_dataset(self):\n \"\"\"Open the xml annotation dataset.\"\"\"\n list_ = [i for i in self.product.glob(\"annotation/*.xml\")]\n xml_file = str(list_[0])\n root = etree.parse(xml_file).getroot()\n return root\n\n @functools.lru_cache()\n def __getitem__(self, index):\n assert index in self.swath_list, f\"{index} is not in {self.swath_list}\"\n return Sentinel1EtadSwath(self.ds[index])\n\n def __iter__(self):\n yield from self.iter_swaths()\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self.product}\") # 0x{id(self):x}'\n\n def __str__(self):\n return f'{self.__class__.__name__}(\"{self.product.name}\")'\n\n @property\n def number_of_swath(self):\n \"\"\"The number of swaths in the product.\"\"\"\n return len(self.ds.groups)\n\n @property\n def swath_list(self):\n \"\"\"The list of swath identifiers (str) in the product.\"\"\"\n return list(self.ds.groups.keys())\n\n def s1_product_list(self):\n \"\"\"Return the list of S-1 products used to compose the ETAD one.\"\"\"\n df = self.burst_catalogue\n\n # this ensures that each product name is located at the correct pIndex\n product_list = [\n item[1] for item in sorted(set(zip(df[\"pIndex\"], df[\"productID\"])))\n ]\n\n return product_list\n\n @property\n def grid_spacing(self):\n \"\"\"Return the grid spacing in meters.\"\"\"\n xp_list = {\n \"x\": \".//correctionGridRangeSampling\",\n \"y\": \".//correctionGridAzimuthSampling\",\n }\n dd = {}\n for tag, xp in xp_list.items():\n dd[tag] = self._xpath_to_list(self._annot, xp, dtype=float)\n dd[\"unit\"] = \"m\"\n return dd\n\n @property\n def grid_sampling(self):\n \"\"\"Return the grid spacing in s.\"\"\"\n xp_list = {\n \"x\": \".//productInformation/gridSampling/range\",\n \"y\": \".//productInformation/gridSampling/azimuth\",\n }\n dd = {}\n for tag, xp in xp_list.items():\n dd[tag] = self._xpath_to_list(self._annot, xp, dtype=float)\n dd[\"unit\"] = \"s\"\n return dd\n\n @property\n def min_azimuth_time(self):\n \"\"\"The minimum azimuth time of all bursts in the product.\"\"\"\n return datetime.datetime.fromisoformat(self.ds.azimuthTimeMin)\n\n @property\n def max_azimuth_time(self):\n \"\"\"The maximum azimuth time of all bursts in the product.\"\"\"\n return datetime.datetime.fromisoformat(self.ds.azimuthTimeMax)\n\n @property\n def min_range_time(self):\n \"\"\"The minimum range time of all bursts in the product.\"\"\"\n return self.ds.rangeTimeMin\n\n @property\n def max_range_time(self):\n \"\"\"The maximum range time of all bursts in the product.\"\"\"\n return self.ds.rangeTimeMax\n\n @property\n def vg(self):\n \"\"\"Mean ground velocity [m/s].\"\"\"\n try:\n xp = (\n \"productInformation/gridGroundSampling/\"\n \"averageZeroDopplerVelocity\"\n )\n vg = float(self._annot.find(xp).taxt)\n except (AttributeError, ValueError):\n vg = self.grid_spacing[\"y\"] / self.grid_sampling[\"y\"]\n return vg\n\n def processing_setting(self):\n \"\"\"Return the corrections performed.\n\n Read the xml file to identify the corrections performed.\n If a correction is not performed the matrix is filled with zeros.\n \"\"\"\n correction_list = [\n \"troposphericDelayCorrection\",\n \"ionosphericDelayCorrection\",\n \"solidEarthTideCorrection\",\n \"bistaticAzimuthCorrection\",\n \"dopplerShiftRangeCorrection\",\n \"FMMismatchAzimuthCorrection\",\n ]\n dd = {}\n xp_root = (\n \"processingInformation/processor/setapConfigurationFile/\"\n \"processorSettings/\"\n )\n for correction in correction_list:\n xp = xp_root + correction\n ret = self._xpath_to_list(self._annot, xp)\n if ret == \"true\":\n ret = True\n else:\n ret = False\n dd[correction] = ret\n return dd\n\n def _init_burst_catalogue(self):\n \"\"\"Build the burst catalog.\n\n Using information stored in the NetCDF file create a\n pandas.DataFrame containing all the elements allowing to index\n properly a burst.\n \"\"\"\n\n def _to_tdelta64(t):\n return np.float64(t * 1e9).astype(\"timedelta64[ns]\")\n\n data = collections.defaultdict(list)\n t0 = np.datetime64(self.ds.azimuthTimeMin, \"ns\")\n for swath in self.ds.groups.values():\n for burst in swath.groups.values():\n ax = burst.variables[\"azimuth\"]\n tmin = t0 + _to_tdelta64(ax[0])\n tmax = t0 + _to_tdelta64(ax[-1])\n\n data[\"bIndex\"].append(burst.bIndex)\n data[\"pIndex\"].append(burst.pIndex)\n data[\"sIndex\"].append(burst.sIndex)\n data[\"productID\"].append(burst.productID)\n data[\"swathID\"].append(burst.swathID)\n data[\"azimuthTimeMin\"].append(tmin)\n data[\"azimuthTimeMax\"].append(tmax)\n\n df = pd.DataFrame(data=data)\n\n return df\n\n def query_burst(\n self,\n first_time=None,\n product_name=None,\n last_time=None,\n swath=None,\n geometry=None,\n ):\n \"\"\"Query the burst catalogue to retrieve the burst matching by time.\n\n Parameters\n ----------\n first_time : datetime\n is set to None then set to the first time\n last_time : datetime\n if set to None the last_time = first_time\n product_name : str\n Name of a real S1 product e.g.\n S1B_IW_SLC__1SDV_20190805T162509_20190805T162...SAFE\n swath : str or list\n list of swathID e.g. 'IW1' or ['IW1'] or ['IW1', 'IW2']\n geometry : shapely.geometry.[Point, Polygon, ...]\n A shapely geometry for which interstion will be searched\n\n Returns\n -------\n pandas.DataFrame\n Filtered panda dataframe\n \"\"\"\n # first sort the burst by time\n df = self.burst_catalogue.sort_values(by=[\"azimuthTimeMin\"])\n if first_time is None:\n first_time = df.iloc[0].azimuthTimeMin\n if last_time is None:\n last_time = df.iloc[-1].azimuthTimeMax\n\n ix0 = (df.azimuthTimeMin >= first_time) & (\n df.azimuthTimeMax <= last_time\n )\n\n if product_name is not None:\n # build a regex based on the name to avoid issues with annotation\n # products and CRC\n product_name = Sentinel1ProductName(product_name)\n product_name.to_annotation(value=\"[AS]\")\n product_name.crc = \"\"\n filter_ = product_name.recompose(with_suffix=False)\n ix0 = ix0 & self.burst_catalogue.productID.str.contains(\n filter_, regex=True\n )\n\n if swath is not None:\n if isinstance(swath, str):\n swath = [swath]\n ix0 = ix0 & df.swathID.isin(swath)\n\n if geometry is not None:\n bix_list = self.intersects(geometry)\n ix0 = ix0 & df.bIndex.isin(bix_list)\n\n return df.loc[ix0]\n\n def _selection_to_swath_list(self, selection=None):\n if selection is None:\n selection = self.burst_catalogue\n\n if isinstance(selection, pd.DataFrame):\n burst_selection = selection\n swath_list = selection.swathID.unique()\n elif isinstance(selection, str):\n burst_selection = None\n swath_list = [selection]\n else:\n # assume it is a list of swaths already\n import collections.abc\n\n assert isinstance(selection, collections.abc.Iterable)\n assert all(isinstance(item, str) for item in selection)\n burst_selection = None\n swath_list = selection\n\n return swath_list, burst_selection\n\n def iter_swaths(self, selection=None):\n \"\"\"Iterate over swaths according to the specified selection.\n\n Parameters\n ----------\n selection : list(str) or pd.Dataframe, optional\n the list of selected swath IDs or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the swaths of the product.\n \"\"\"\n swath_list, _ = self._selection_to_swath_list(selection)\n for swath_name in swath_list:\n yield self[swath_name]\n\n def iter_bursts(self, selection=None):\n \"\"\"Iterate over burst according to the specified selection.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected burst indexes or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the bursts of the product.\n \"\"\"\n if selection is None:\n selection = self.burst_catalogue\n elif not isinstance(selection, pd.DataFrame):\n # assume it is a list of burst indexes\n bursts = selection\n if isinstance(bursts, int):\n bursts = [selection]\n # NOTE: preserve the order\n selection = self.burst_catalogue.bIndex.isin(bursts)\n\n assert isinstance(selection, pd.DataFrame)\n\n for idx, row in selection.iterrows():\n burst = self[row.swathID][row.bIndex]\n yield burst\n\n @staticmethod\n def _xpath_to_list(\n root, xpath, dtype=None, namespace=None, parse_time_func=None\n ):\n ll = [elt.text for elt in root.findall(xpath, namespace)]\n if parse_time_func is not None:\n ll = [datetime.datetime.fromisoformat(t) for t in ll]\n ll = parse_time_func(ll) # TODO: check\n ll = np.asarray(ll, dtype=dtype)\n\n if ll.size == 1:\n return ll.item(0)\n else:\n return ll\n\n def get_statistics(self, correction, meter=False):\n \"\"\"Return the global statistic value of the specified correction.\n\n The returned value is the pre-computed one that is stored in the\n XML annotation file of the product.\n\n Parameters\n ----------\n correction : str or ECorrectionType\n the corrections for which the statistic value is requested\n meter : bool\n if set to True then the returned value is expressed in meters,\n otherwise it is expressed in seconds (default: False)\n\n Returns\n -------\n dict\n a dictionary containing :class:`Statistics` (min, mean and max)\n for all available components of the specified correction:\n\n :x:\n a :class:`Statistics` instance relative to the range\n component of the specified correction\n :y:\n a :class:`Statistics` instance relative to the azimuth\n component of the specified correction\n :unit:\n the units of the returned statistics (\"m\" or \"s\")\n \"\"\"\n units = \"m\" if meter else \"s\"\n\n stat_xp = \"./qualityAndStatistics\"\n target = ECorrectionType(correction)\n target_tag = _STATS_TAG_MAP[target]\n\n statistics = {\"unit\": units}\n\n # NOTE: looping on element and heuristic test on tags is necessary\n # due to inconsistent naming of range and azimuth element\n # TODO: report the inconsistency to DLR? (TBD)\n correction_elem = self._annot.find(f\"{stat_xp}/{target_tag}\")\n for elem in correction_elem:\n if \"range\" in elem.tag:\n direction = \"x\"\n elif \"azimuth\" in elem.tag:\n direction = \"y\"\n else:\n continue\n\n statistics[direction] = Statistics(\n float(elem.findtext(f'min[@unit=\"{units}\"]')),\n float(elem.findtext(f'mean[@unit=\"{units}\"]')),\n float(elem.findtext(f'max[@unit=\"{units}\"]')),\n )\n\n return statistics\n\n def get_footprint(self, selection=None, merge=False):\n \"\"\"Return the footprints of all the bursts as MultiPolygon.\n\n It calls in the back the get_footprint of the Sentinel1EtadBurst class.\n\n Parameters\n ----------\n selection : list(str) or pd.Dataframe, optional\n the list of selected swath IDs or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the swaths of the product.\n merge : bool\n if set to True return a single polygon that is the union of the\n footprints of all bursts\n \"\"\"\n polys = []\n swath_list, burst_selection = self._selection_to_swath_list(selection)\n for swath in self.iter_swaths(swath_list):\n polys.extend(swath.get_footprint(burst_selection))\n\n if merge:\n polys = shapely.ops.cascaded_union(polys)\n else:\n polys = MultiPolygon(polys)\n\n return polys\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Return the list of burst indexes intersecting the input geometry.\n\n Computes the intersection of the footprint of the swath (all bursts)\n with the input geometry.\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n list\n list of all the burst intersecting with the input shape geometry\n \"\"\"\n lists_of_burst_indexes = [\n swath.intersects(geometry) for swath in self.iter_swaths()\n ]\n # return the flattened list\n return list(itertools.chain(*lists_of_burst_indexes))\n\n def _swath_merger(\n self,\n burst_var,\n selection=None,\n set_auto_mask=False,\n meter=False,\n fill_value=0.0,\n ):\n if selection is None:\n df = self.burst_catalogue\n elif not isinstance(selection, pd.DataFrame):\n df = self.query_burst(swath=selection)\n else:\n assert isinstance(selection, pd.DataFrame)\n df = selection\n\n # NOTE: assume a specific order of swath IDs\n first_swath = self[df.swathID.min()]\n near_burst = first_swath[first_swath.burst_list[0]]\n last_swath = self[df.swathID.max()]\n far_burst = last_swath[last_swath.burst_list[0]]\n\n rg_first_time = near_burst.sampling_start[\"x\"]\n rg_last_time = (\n far_burst.sampling_start[\"x\"]\n + far_burst.sampling[\"x\"] * far_burst.samples\n )\n az_first_time = df.azimuthTimeMin.min()\n az_last_time = df.azimuthTimeMax.max()\n az_ref_time = self.min_azimuth_time\n az_first_time_rel = (az_first_time - az_ref_time).total_seconds()\n\n sampling = self.grid_sampling\n dx = sampling[\"x\"]\n dy = sampling[\"y\"]\n\n num_samples = (\n np.round((rg_last_time - rg_first_time) / dx).astype(int) + 1\n )\n num_lines = (\n np.round(\n (az_last_time - az_first_time).total_seconds() / dy\n ).astype(int)\n + 1\n )\n\n img = np.full((num_lines, num_samples), fill_value=fill_value)\n # TODO: add some control option\n img = np.ma.array(img, mask=True, fill_value=fill_value)\n\n for swath in self.iter_swaths(df):\n # NOTE: use the private \"Sentinel1EtadSwath._burst_merger\" method\n # to be able to work only on the specified NetCDF variable\n dd_ = swath._burst_merger(\n burst_var,\n selection=df, # noqa\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n yoffset = dd_[\"first_azimuth_time\"] - az_first_time_rel\n xoffset = dd_[\"first_slant_range_time\"] - rg_first_time\n line_ofs = np.round(yoffset / dy).astype(int)\n sample_ofs = np.round(xoffset / dx).astype(int)\n\n slice_y = slice(line_ofs, line_ofs + dd_[burst_var].shape[0])\n slice_x = slice(sample_ofs, sample_ofs + dd_[burst_var].shape[1])\n\n img[slice_y, slice_x] = dd_[burst_var]\n\n return {\n burst_var: img,\n \"first_azimuth_time\": az_first_time,\n \"first_slant_range_time\": rg_first_time,\n \"sampling\": sampling,\n }\n\n def _core_merge_correction(\n self, prm_list, selection=None, set_auto_mask=True, meter=False\n ):\n dd = {}\n for dim, field in prm_list.items():\n dd_ = self._swath_merger(\n field,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n dd[dim] = dd_[field]\n dd[\"sampling\"] = dd_[\"sampling\"]\n dd[\"first_azimuth_time\"] = dd_[\"first_azimuth_time\"]\n dd[\"first_slant_range_time\"] = dd_[\"first_slant_range_time\"]\n\n dd[\"unit\"] = \"m\" if meter else \"s\"\n\n # To compute lat/lon/h make a new selection with all gaps filled\n swath_list, _ = self._selection_to_swath_list(selection)\n near_swath = min(swath_list)\n far_swath = max(swath_list)\n idx = self.burst_catalogue.swathID >= near_swath\n idx &= self.burst_catalogue.swathID <= far_swath\n swaths = self.burst_catalogue.swathID[idx].unique()\n\n data = dd[\"x\" if \"x\" in prm_list else \"y\"]\n lines = data.shape[0]\n duration = lines * self.grid_sampling[\"y\"]\n duration = np.float64(duration * 1e9).astype(\"timedelta64[ns]\")\n first_time = dd[\"first_azimuth_time\"]\n last_time = first_time + duration\n\n filled_selection = self.query_burst(\n first_time=first_time, last_time=last_time, swath=swaths\n )\n\n dd[\"lats\"] = self._swath_merger(\n \"lats\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"lats\"]\n dd[\"lons\"] = self._swath_merger(\n \"lons\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"lons\"]\n dd[\"height\"] = self._swath_merger(\n \"height\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"height\"]\n return dd\n\n def merge_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n selection=None,\n set_auto_mask=True,\n meter=False,\n direction=None,\n ):\n \"\"\"Merge multiple swaths of the specified correction variable.\n\n Data of the selected swaths (typically overlapped) are merged\n together to form a single data matrix with a consistent (range and\n azimuth) time axis.\n\n Note\n ----\n\n The current implementation uses a very simple algorithm that\n iterates over selected swaths and bursts and stitches correction\n data together.\n\n In overlapping regions, new data simpy overwrite the old ones.\n This is an easy algorithm and perfectly correct for atmospheric\n and geodetic correction.\n\n It is, instead, sub-optimal for system corrections (bi-static,\n Doppler, FM Rate) which have different values in overlapping\n regions. In this case results are *not* correct.\n\n Parameters\n ----------\n name : str or CorrectionType\n the name of the desired correction\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>:\n merged data for the selected burst_var\n :first_azimuth_time:\n the relative azimuth first time\n :first_slant_range_time:\n the relative (slant) range first time\n :sampling:\n a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n :units:\n of the correction (seconds or meters)\n :lats:\n the matrix of latitude values (in degrees) for each point\n :lons:\n the matrix of longitude values (in degrees) for each point\n :height:\n the matrix of height values (in meters) for each point\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n prm_list = _CORRECTION_NAMES_MAP[correction_type.value]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_merge_correction(\n prm_list,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n correction[\"name\"] = correction_type.value\n return correction" }, { "identifier": "Sentinel1EtadSwath", "path": "s1etad/product.py", "snippet": "class Sentinel1EtadSwath:\n \"\"\"Object representing a swath in the S1-ETAD product.\n\n This objects are returned by methods of the :class:`Sentine1Etad` class.\n It is not expected that the user instantiates this objects directly.\n \"\"\"\n\n def __init__(self, nc_group):\n self._grp = nc_group\n\n @functools.lru_cache()\n def __getitem__(self, burst_index):\n burst_name = f\"Burst{burst_index:04d}\"\n return Sentinel1EtadBurst(self._grp[burst_name])\n\n def __iter__(self):\n yield from self.iter_bursts()\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self._grp.path}\") 0x{id(self):x}'\n\n @property\n def burst_list(self):\n \"\"\"The list of burst identifiers (str) of all bursts in the swath.\"\"\"\n return [burst.bIndex for burst in self._grp.groups.values()]\n\n @property\n def number_of_burst(self):\n \"\"\"The number of bursts in the swath.\"\"\"\n return len(self._grp.groups)\n\n @property\n def swath_id(self):\n \"\"\"The swath identifier (str).\"\"\"\n return self._grp.swathID\n\n @property\n def swath_index(self):\n \"\"\"The swath index (int).\"\"\"\n return self._grp.sIndex\n\n @property\n def sampling_start(self):\n \"\"\"Relative sampling start times.\"\"\"\n first_burst_index = self.burst_list[0]\n first_burst = self[first_burst_index]\n return first_burst.sampling_start\n\n @property\n def sampling(self):\n \"\"\"Sampling in seconds used for all bursts of the swath.\n\n A dictionary containing the following keys:\n\n * \"x\": range spacing,\n * \"y\": azimuth spacing,\n * \"units\": the measurement units used for \"x' and \"y\"\n \"\"\"\n first_burst_index = self.burst_list[0]\n first_burst = self[first_burst_index]\n return first_burst.sampling\n\n def _selection_to_burst_index_list(self, selection=None):\n if selection is None:\n index_list = self.burst_list\n elif isinstance(selection, pd.DataFrame):\n idx = selection.swathID == self.swath_id\n index_list = selection.bIndex[idx].values\n else:\n index_list = selection\n return index_list\n\n def iter_bursts(self, selection=None):\n \"\"\"Iterate over bursts according to the specified selection.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected bursts or result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the burst of the swath.\n \"\"\"\n index_list = self._selection_to_burst_index_list(selection)\n for burst_index in index_list:\n yield self[burst_index]\n\n def get_footprint(self, selection=None, merge=False):\n \"\"\"Return the footprints of all the bursts as MultiPolygon.\n\n It calls in the back the get_footprint of the Sentinel1EtadBurst class.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected bursts or result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the burst of the swath.\n merge : bool\n if set to True return a single polygon that is the union of the\n footprints of all bursts\n \"\"\"\n polys = [\n burst.get_footprint() for burst in self.iter_bursts(selection)\n ]\n if merge:\n polys = shapely.ops.cascaded_union(polys)\n else:\n polys = MultiPolygon(polys)\n\n return polys\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Return the list of burst indexes intersecting the input geometry.\n\n Computes the intersection of the footprint of the swath (all bursts)\n with the input Geometry\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n list\n list of the indexes of all bursts intersecting with the input\n geometry\n \"\"\"\n assert isinstance(\n geometry, BaseGeometry\n ), \"The input shape is not a shapely BaseGeometry object\"\n burst_index_list = []\n swath_footprint = self.get_footprint(merge=True)\n if swath_footprint.intersects(geometry):\n burst_index_list = [\n b.burst_index\n for b in self.iter_bursts()\n if b.intersects(geometry)\n ]\n return burst_index_list\n\n def _burst_merger(\n self,\n burst_var,\n selection=None,\n az_time_min=None,\n az_time_max=None,\n set_auto_mask=False,\n meter=False,\n fill_value=0.0,\n ):\n \"\"\"Low level method to de-burst a NetCDF variable.\n\n The de-burst strategy is simple as the latest line is on top of the\n oldest.\n\n Parameters\n ----------\n burst_var : str\n one of the burst netcdf variables\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n az_time_min : float\n minimum azimuth time of the merged swath\n (relative to the reference annotated in the NetCDF root)\n az_time_max : float\n maximum azimuth tim eof the merged swath\n (relative to the reference annotated in the NetCDF root)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>: merged data for the selected burst_var\n :first_azimuth_time: the relative azimuth first time\n :first_slant_range_time: the relative (slant) range first time\n :sampling: a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n \"\"\"\n burst_index_list = self._selection_to_burst_index_list(selection)\n\n # Find what is the extent of the acquisition in azimuth\n first_burst = self[burst_index_list[0]]\n last_burst = self[burst_index_list[-1]]\n\n if az_time_min is None:\n t0 = first_burst.sampling_start[\"y\"]\n else:\n t0 = az_time_min\n\n last_azimuth, _ = last_burst.get_burst_grid()\n if az_time_max is None:\n t1 = last_azimuth[-1]\n else:\n t1 = az_time_max\n\n tau0 = min(\n burst.sampling_start[\"x\"]\n for burst in self.iter_bursts(burst_index_list)\n )\n\n # grid sampling\n dt = first_burst.sampling[\"y\"]\n dtau = first_burst.sampling[\"x\"]\n\n num_lines = np.round((t1 - t0) / dt).astype(int) + 1\n num_samples = max(\n burst.samples for burst in self.iter_bursts(burst_index_list)\n )\n\n debursted_var = np.full(\n (num_lines, num_samples), fill_value=fill_value\n )\n # TODO: add some control option\n debursted_var = np.ma.array(\n debursted_var, mask=True, fill_value=fill_value\n )\n\n for burst_ in self.iter_bursts(burst_index_list):\n assert (\n dt == burst_.sampling[\"y\"]\n ), \"The azimuth sampling is changing long azimuth\"\n assert (\n first_burst.sampling_start[\"x\"] == burst_.sampling_start[\"x\"]\n ), \"The 2-way range gridStartRangeTime is changing long azimuth\"\n\n # get the timing of the burst and convert into line index\n az_time_, rg_time_ = burst_.get_burst_grid()\n line_index_ = np.round((az_time_ - t0) / dt).astype(int)\n p0 = np.round((rg_time_[0] - tau0) / dtau).astype(int)\n\n # NOTE: use the private \"Sentinel1EtadBurst._get_etad_param\" method\n # to be able to work only on the specified NetCDF variable\n var_ = burst_._get_etad_param(\n burst_var, set_auto_mask=set_auto_mask, meter=meter # noqa\n )\n\n _, burst_samples = var_.shape\n debursted_var[line_index_, p0 : p0 + burst_samples] = var_\n\n return {\n burst_var: debursted_var,\n \"first_azimuth_time\": t0,\n \"first_slant_range_time\": first_burst.sampling_start[\"x\"],\n \"sampling\": first_burst.sampling,\n }\n\n def _core_merge_correction(\n self, prm_list, selection=None, set_auto_mask=True, meter=False\n ):\n dd = {}\n for dim, field in prm_list.items():\n dd_ = self._burst_merger(\n field,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n dd[dim] = dd_[field]\n dd[\"sampling\"] = dd_[\"sampling\"]\n dd[\"first_azimuth_time\"] = dd_[\"first_azimuth_time\"]\n dd[\"first_slant_range_time\"] = dd_[\"first_slant_range_time\"]\n\n dd[\"unit\"] = \"m\" if meter else \"s\"\n dd[\"lats\"] = self._burst_merger(\n \"lats\", set_auto_mask=set_auto_mask, meter=False\n )[\"lats\"]\n dd[\"lons\"] = self._burst_merger(\n \"lons\", set_auto_mask=set_auto_mask, meter=False\n )[\"lons\"]\n dd[\"height\"] = self._burst_merger(\n \"height\", set_auto_mask=set_auto_mask, meter=False\n )[\"height\"]\n return dd\n\n def merge_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n selection=None,\n set_auto_mask=True,\n meter=False,\n direction=None,\n ):\n \"\"\"Merge multiple bursts of the specified correction variable.\n\n Data of the selected bursts (typically overlapped) are merged\n together to form a single data matrix with a consistent (azimuth)\n time axis.\n\n Note\n ----\n\n The current implementation uses a very simple algorithm that\n iterates over selected bursts and stitches correction data\n together.\n\n In overlapping regions, new data simpy overwrite the old ones.\n This is an easy algorithm and perfectly correct for atmospheric\n and geodetic correction.\n\n It is, instead, sub-optimal for system corrections (bi-static,\n Doppler, FM Rate) which have different values in overlapping\n regions. In this case results are *not* correct.\n\n Parameters\n ----------\n name : str or CorrectionType\n the name of the desired correction\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>:\n merged data for the selected burst_var\n :first_azimuth_time:\n the relative azimuth first time\n :first_slant_range_time:\n the relative (slant) range first time\n :sampling:\n a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n :units:\n of the correction (seconds or meters)\n :lats:\n the matrix of latitude values (in degrees) for each point\n :lons:\n the matrix of longitude values (in degrees) for each point\n :height:\n the matrix of height values (in meters) for each point\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n prm_list = _CORRECTION_NAMES_MAP[correction_type.value]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_merge_correction(\n prm_list,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n correction[\"name\"] = correction_type.value\n return correction" }, { "identifier": "Sentinel1EtadBurst", "path": "s1etad/product.py", "snippet": "class Sentinel1EtadBurst:\n \"\"\"Object representing a burst in the S1-ETAD product.\n\n This objects are returned by methods of the :class:`Sentinel1EtadSwath`\n class.\n It is not expected that the user instantiates this objects directly.\n \"\"\"\n\n def __init__(self, nc_group):\n self._grp = nc_group\n self._geocoder = None\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self._grp.path}\") 0x{id(self):x}'\n\n @property\n def product_id(self):\n \"\"\"The S1 product (str) to which the burst object is associated.\"\"\"\n return self._grp.productID\n\n @property\n def swath_id(self):\n \"\"\"The swath identifier (str) to which the burst belongs.\"\"\"\n return self._grp.swathID\n\n @property\n def burst_id(self):\n \"\"\"The burst identifier (str).\"\"\"\n return self._grp.name\n\n @property\n def product_index(self):\n \"\"\"Index (int) of the S1 product to which the burst is associated.\"\"\"\n return self._grp.pIndex\n\n @property\n def swath_index(self):\n \"\"\"The index (int) of the swath to which the burst belongs.\"\"\"\n return self._grp.sIndex\n\n @property\n def burst_index(self):\n \"\"\"The index (int) of the burst.\"\"\"\n return self._grp.bIndex\n\n @functools.lru_cache()\n def get_footprint(self):\n \"\"\"Return the footprint of ghe bursts as shapely.Polygon.\n\n It gets the lat/lon/height grid and extract the 4 corners.\n \"\"\"\n lats, lons, heights = self.get_lat_lon_height()\n corner_list = [(0, 0), (0, -1), (-1, -1), (-1, 0)]\n etaf_burst_footprint = []\n for corner in corner_list:\n lat_, lon_, h_ = lats[corner], lons[corner], heights[corner]\n etaf_burst_footprint.append((lon_, lat_, h_))\n etaf_burst_footprint = Polygon(etaf_burst_footprint)\n return etaf_burst_footprint\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Intersects the footprint of the burst with the provided shape\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n bool\n True if intersects, False otherwise\n \"\"\"\n assert isinstance(\n geometry, BaseGeometry\n ), \"Not a shapely BaseGeometry object\"\n return self.get_footprint().intersects(geometry)\n\n def get_burst_grid(self):\n \"\"\"Return the t, tau grid of the burst.\"\"\"\n azimuth = self._get_etad_param(\"azimuth\", set_auto_mask=True)\n range_ = self._get_etad_param(\"range\", set_auto_mask=True)\n return azimuth, range_\n\n @property\n def sampling_start(self):\n \"\"\"Relative sampling start times.\n\n Value in seconds relative to the beginning of the product.\n \"\"\"\n # TODO: put a reference in the docstring to the proper\n # Sentinel1Etad property.\n return dict(\n x=self._grp.gridStartRangeTime,\n y=self._grp.gridStartAzimuthTime,\n units=\"s\",\n )\n\n @property\n def sampling(self):\n \"\"\"Sampling in seconds used for all bursts of the swath.\n\n A dictionary containing the following keys:\n\n * \"x\": range spacing,\n * \"y\": azimuth spacing,\n * \"units\": the measurement units used for \"x' and \"y\"\n \"\"\"\n return dict(\n x=self._grp.gridSamplingRange,\n y=self._grp.gridSamplingAzimuth,\n units=\"s\",\n )\n\n @property\n def lines(self):\n \"\"\"The number of lines in the burst.\"\"\"\n return self._grp.dimensions[\"azimuthExtent\"].size\n\n @property\n def samples(self):\n \"\"\"The number of samples in the burst.\"\"\"\n return self._grp.dimensions[\"rangeExtent\"].size\n\n @property\n def vg(self) -> float:\n \"\"\"Average zero-Doppler ground velocity [m/s].\"\"\"\n return self._grp.averageZeroDopplerVelocity\n\n @property\n def reference_polarization(self) -> str:\n \"\"\"Reverence polarization (string).\"\"\"\n return self._grp.referencePolarization\n\n def get_polarimetric_channel_offset(self, channel: str) -> dict:\n \"\"\"Polarimetric channel delay.\n\n Return the electronic delay of the specified polarimetric channel\n w.r.t. the reference one (see\n :data:`Sentinel1EtadBurst.reference_polarization`).\n\n channel : str\n the string ID of the requested polarimetric channel:\n * 'VV' or 'VH' for DV products\n * 'HH' or 'HV' for DH products\n \"\"\"\n if channel not in {\"HH\", \"HV\", \"VV\", \"VH\"}:\n raise ValueError(f\"invalid channel ID: {channel!r}\")\n\n if channel[0] != self._grp.referencePolarization[0]:\n raise ValueError(\n f\"polarimetric channel not available: {channel!r}\"\n )\n\n data = dict(units=\"s\")\n\n if channel == \"HH\":\n data[\"x\"] = (self._grp.rangeOffsetHH,)\n data[\"y\"] = (self._grp.rangeOffsetHH,)\n elif channel == \"HV\":\n data[\"x\"] = (self._grp.rangeOffsetHV,)\n data[\"y\"] = (self._grp.rangeOffsetHV,)\n elif channel == \"VH\":\n data[\"x\"] = (self._grp.rangeOffsetVH,)\n data[\"y\"] = (self._grp.rangeOffsetVH,)\n elif channel == \"VV\":\n data[\"x\"] = (self._grp.rangeOffsetVV,)\n data[\"y\"] = (self._grp.rangeOffsetVV,)\n\n return data\n\n def get_timing_calibration_constants(self) -> dict:\n try:\n return dict(\n x=self._grp.instrumentTimingCalibrationRange,\n y=self._grp.instrumentTimingCalibrationAzimuth,\n units=\"s\",\n )\n except AttributeError:\n # @COMPATIBILITY: with SETAP , v1.6\n warnings.warn(\n \"instrument timing calibration constants are not available \"\n \"in the NetCDF data component this product. \"\n \"Calibration constants have been added to the NetCDF \"\n \"component in SETAP v1.6 (ETAD-DLR-PS-0014 - \"\n '\"ETAD Product Format Specification\" Issue 1.5).'\n )\n return dict(x=0, y=0, units=\"s\")\n\n def _get_etad_param(\n self, name, set_auto_mask=False, transpose=False, meter=False\n ):\n assert (\n name in self._grp.variables\n ), f\"Parameter {name!r} is not allowed\"\n\n self._grp.set_auto_mask(set_auto_mask)\n\n # TODO: avoid double copies\n # TODO: decimation factor\n field = np.asarray(self._grp[name])\n if transpose:\n field = np.transpose(field)\n\n if meter:\n if name.endswith(\"Az\"):\n k = self._grp.averageZeroDopplerVelocity\n elif name.endswith(\"Rg\"):\n k = constants.c / 2\n else:\n # it is not a correction (azimuth, range, lats, lons, height)\n k = 1\n warnings.warn(\n f\"the {name} is not a correction: \"\n 'the \"meter\" parameter will be ignored'\n )\n field *= k\n\n return field\n\n def get_lat_lon_height(self, transpose=False):\n \"\"\"Return the latitude, longitude and height for each point.\n\n Data are returned as (3) matrices (lines x samples).\n Latitude and longitude are expressed in degrees, height is\n expressed in meters.\n \"\"\"\n lats = self._get_etad_param(\n \"lats\", transpose=transpose, meter=False, set_auto_mask=True\n )\n lons = self._get_etad_param(\n \"lons\", transpose=transpose, meter=False, set_auto_mask=True\n )\n h = self._get_etad_param(\n \"height\", transpose=transpose, meter=False, set_auto_mask=True\n )\n return lats, lons, h\n\n def _core_get_correction(\n self, prm_list, set_auto_mask=False, transpose=False, meter=False\n ):\n correction = {}\n for dim, field in prm_list.items():\n correction[dim] = self._get_etad_param(\n field,\n set_auto_mask=set_auto_mask,\n transpose=transpose,\n meter=meter,\n )\n\n correction[\"unit\"] = \"m\" if meter else \"s\"\n\n return correction\n\n def get_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n set_auto_mask=False,\n transpose=False,\n meter=False,\n direction=None,\n ):\n \"\"\"Retrieve the correction for the specified correction \"name\".\n\n Puts the results in a dict.\n\n Parameters\n ----------\n name : ECorrectionType or str\n the desired correction\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n transpose : bool\n requested to retrieve the correction in array following the\n numpy convention for dimensions (default: False)\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing the following items for the\n requested correction:\n\n :x: correction in range (if applicable)\n :y: correction in azimuth (if applicable)\n :unit: 'm' or 's'\n :name: name of the correction\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n name = correction_type.value\n prm_list = _CORRECTION_NAMES_MAP[name]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_get_correction(\n prm_list,\n set_auto_mask=set_auto_mask,\n transpose=transpose,\n meter=meter,\n )\n correction[\"name\"] = name\n return correction\n\n def _get_geocoder(self):\n if self._geocoder is None:\n from .geometry import GridGeocoding\n\n azimuth, range_ = self.get_burst_grid()\n lats, lons, heights = self.get_lat_lon_height()\n self._geocoder = GridGeocoding(\n lats, lons, heights, xaxis=range_, yaxis=azimuth\n )\n return self._geocoder\n\n def radar_to_geodetic(self, tau, t, deg=True):\n \"\"\"Convert RADAR coordinates into geodetic coordinates.\n\n Compute the geodetic coordinates (lat, lon, h) corresponding to\n RADAR coordinates (tau, t), i.e. fast time (range time) and slow\n time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`)::\n\n (tau, t) -> (lat, lon, h)\n\n If ``deg`` is True the output ``lat`` and ``lon`` are expressed\n in degrees, otherwise in radians.\n\n The implementation is approximated and exploits pre-computed grids\n of latitude, longitude and height values.\n\n The method is not as accurate as solving the range-Doppler equations.\n\n .. seealso:: :class:`s1etad.geometry.GridGeocoding`.\n \"\"\"\n return self._get_geocoder().forward_geocode(tau, t, deg=deg)\n\n def geodetic_to_radar(self, lat, lon, h=0, deg=True):\n \"\"\"Convert geodetic coordinates into RADAR coordinates.\n\n Compute the RADAR coordinates (tau, t), i.e. fast time (range time)\n and slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`) corresponding to\n geodetic coordinates (lat, lon, h)::\n\n (lat, lon, h) -> (tau, t)\n\n If ``deg`` is True it is assumed that input ``lat`` and ``lon``\n are expressed in degrees, otherwise it is assumed that angles\n are expressed in radians.\n\n The implementation is approximated and exploits pre-computed grids\n of latitude, longitude and height values.\n\n The method is not as accurate as solving the range-Doppler equations.\n\n .. seealso:: :class:`s1etad.geometry.GridGeocoding`.\n \"\"\"\n return self._get_geocoder().backward_geocode(lat, lon, h, deg=deg)\n\n def radar_to_image(self, t, tau):\n \"\"\"Convert RADAR coordinates into image coordinates.\n\n Compute the image coordinates (line, sample) corresponding\n to RADAR coordinates (tau, t), i.e. fast time (range time) and\n slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`)::\n\n (tau, t) -> (line, sample)\n \"\"\"\n line = (t - self.sampling_start[\"y\"]) / self.sampling[\"y\"]\n sample = (tau - self.sampling_start[\"x\"]) / self.sampling[\"x\"]\n return line, sample\n\n def image_to_radar(self, line, sample):\n \"\"\"Convert image coordinates into RADAR coordinates.\n\n Compute the RADAR coordinates (tau, t), i.e. fast time (range time)\n and slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`) corresponding to\n image coordinates (line, sample)::\n\n (line, sample) -> (t, tau)\n \"\"\"\n t = self.sampling_start[\"y\"] + line * self.sampling[\"y\"]\n tau = self.sampling_start[\"x\"] + sample * self.sampling[\"x\"]\n return t, tau" } ]
from .product import Sentinel1Etad, Sentinel1EtadSwath, Sentinel1EtadBurst
13,421
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"]
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"]
formatter.for_type(Sentinel1Etad, _sentinel1_etad_repr_pretty_)
0
2023-10-27 13:47:30+00:00
16k
ifrit98/storage-subnet
storage/validator/store.py
[ { "identifier": "EventSchema", "path": "storage/validator/event.py", "snippet": "class EventSchema:\n task_name: str # Task type, e.g. 'store', 'challenge', 'retrieve' 'broadcast'\n successful: List[bool] # List of whether or not the task was successful or not\n completion_times: List[float] # List of completion times for a given task\n task_status_messages: List[\n str\n ] # List of completion status messages for a given prompt\n task_status_codes: List[str] # List of completion status codes for a given prompt\n block: float # Current block at given step\n uids: List[int] # Queried uids\n step_length: float # Elapsed time between the beginning of a run step to the end of a run step\n best_uid: str # Best completion for given task\n best_hotkey: str # Best hotkey for given task\n\n # Reward data\n rewards: List[float] # Reward vector for given step\n\n # Weights data and moving averages\n set_weights: Optional[List[List[float]]] = None\n moving_averaged_scores: Optional[List[float]] = None\n\n @staticmethod\n def from_dict(event_dict: dict) -> \"EventSchema\":\n \"\"\"Converts a dictionary to an EventSchema object.\"\"\"\n\n return EventSchema(\n task_name=event_dict[\"task_name\"],\n successful=event_dict[\"successful\"],\n completion_times=event_dict[\"completion_times\"],\n task_status_messages=event_dict[\"task_status_messages\"],\n task_status_codes=event_dict[\"task_status_codes\"],\n block=event_dict[\"block\"],\n uids=event_dict[\"uids\"],\n step_length=event_dict[\"step_length\"],\n best_uid=event_dict[\"best_uid\"],\n best_hotkey=event_dict[\"best_hotkey\"],\n rewards=event_dict[\"rewards\"],\n set_weights=event_dict[\"set_weights\"],\n moving_averaged_scores=event_dict[\"moving_averaged_scores\"],\n )" }, { "identifier": "protocol", "path": "storage/protocol.py", "snippet": "class Store(bt.Synapse):\nclass StoreUser(bt.Synapse):\nclass Challenge(bt.Synapse):\nclass Retrieve(bt.Synapse):\nclass RetrieveUser(bt.Synapse):\n def __str__(self):\n def __str__(self):\n def __str__(self):" }, { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strings and then encoding to bytes before hashing.\n\n Parameters:\n - data (bytes | bytearray | object): Data to be hashed.\n\n Returns:\n - int: Integer representation of the SHA3-256 hash of the input data.\n\n Raises:\n - TypeError: If the hashing operation encounters an incompatible data type.\n \"\"\"\n if not isinstance(data, (bytes, bytearray)):\n data_str = str(data)\n data = data_str.encode()\n h = hashlib.sha3_256(data).hexdigest()\n return int(h, 16)" }, { "identifier": "setup_CRS", "path": "storage/shared/ecc.py", "snippet": "def setup_CRS(curve=\"P-256\"):\n \"\"\"\n Generate a pair of random points to serve as a Common Reference String (CRS) for elliptic curve operations.\n\n The CRS is essential for various cryptographic protocols that rely on a shared reference\n between parties, typically for the purpose of ensuring consistent cryptographic operations.\n\n Parameters:\n - curve (str, optional): Name of the elliptic curve to use; defaults to \"P-256\".\n\n Returns:\n - tuple(ECC.EccPoint, ECC.EccPoint): A 2-tuple of ECC.EccPoint instances representing the base points (g, h).\n\n Raises:\n - ValueError: If the specified elliptic curve name is not recognized.\n \"\"\"\n curve_obj = ECC.generate(curve=curve)\n g = curve_obj.pointQ # Base point\n h = ECC.generate(curve=curve).pointQ # Another random point\n return g, h" }, { "identifier": "ecc_point_to_hex", "path": "storage/shared/ecc.py", "snippet": "def ecc_point_to_hex(point):\n \"\"\"\n Convert an elliptic curve point to a hexadecimal string.\n\n This encoding is typically used for compact representation or for preparing the data\n to be transmitted over protocols that may not support binary data.\n\n Parameters:\n - point (ECC.EccPoint): An ECC point to convert.\n\n Returns:\n - str: Hexadecimal string representing the elliptic curve point.\n\n Raises:\n - AttributeError: If the input is not a valid ECC point with accessible x and y coordinates.\n \"\"\"\n point_str = \"{},{}\".format(point.x, point.y)\n return binascii.hexlify(point_str.encode()).decode()" }, { "identifier": "b64_encode", "path": "storage/shared/utils.py", "snippet": "def b64_encode(data: Union[bytes, str, List[str], List[bytes], dict]) -> str:\n \"\"\"\n Encodes the given data into a base64 string. If the data is a list or dictionary of bytes, it converts\n the bytes into hexadecimal strings before encoding.\n\n Args:\n data (list or dict): The data to be base64 encoded. Can be a list of bytes or a dictionary with bytes values.\n\n Returns:\n str: The base64 encoded string of the input data.\n\n Raises:\n TypeError: If the input is not a list, dict, or bytes.\n \"\"\"\n if isinstance(data, bytes):\n data = data.hex()\n if isinstance(data, list) and len(data) and isinstance(data[0], bytes):\n data = [d.hex() for d in data]\n if isinstance(data, dict) and isinstance(data[list(data.keys())[0]], bytes):\n data = {k: v.hex() for k, v in data.items()}\n return base64.b64encode(json.dumps(data).encode()).decode(\"utf-8\")" }, { "identifier": "make_random_file", "path": "storage/validator/utils.py", "snippet": "def make_random_file(name: str = None, maxsize: int = None) -> Union[bytes, str]:\n \"\"\"\n Creates a file with random binary data or returns a bytes object with random data if no name is provided.\n\n Args:\n name (str, optional): The name of the file to create. If None, the function returns the random data instead.\n maxsize (int): The maximum size of the file or bytes object to be created, in bytes. Defaults to 1024.\n\n Returns:\n bytes: If 'name' is not provided, returns a bytes object containing random data.\n None: If 'name' is provided, a file is created and returns the filepath stored.\n\n Raises:\n OSError: If the function encounters an error while writing to the file.\n \"\"\"\n size = (\n random.randint(random.randint(24, 128), maxsize)\n if maxsize != None\n else generate_file_size_with_lognormal()\n )\n data = os.urandom(size)\n if isinstance(name, str):\n with open(name, \"wb\") as fout:\n fout.write(data)\n return name # Return filepath of saved data\n else:\n return data # Return the data itself" }, { "identifier": "compute_chunk_distribution_mut_exclusive_numpy_reuse_uids", "path": "storage/validator/utils.py", "snippet": "async def compute_chunk_distribution_mut_exclusive_numpy_reuse_uids(\n self, data_size, R, k, chunk_size=None, exclude=None\n):\n \"\"\"\n Asynchronously computes a distribution of data chunks across a set of unique identifiers (UIDs),\n taking into account redundancy and chunk size optimization. This function is useful for distributing\n data across a network of nodes or miners in a way that ensures redundancy and optimal utilization.\n\n Parameters:\n self: Reference to the class instance from which this method is called.\n data_size (int): The total size of the data to be distributed, in bytes.\n R (int): Redundancy factor, denoting the number of times each chunk should be replicated.\n k (int): The number of unique identifiers (UIDs) to be involved in the distribution.\n chunk_size (int, optional): The size of each data chunk. If not provided, an optimal chunk size\n is calculated based on the data size and the number of UIDs.\n\n Yields:\n dict: A dictionary representing a chunk's metadata, including its size, start index, end index,\n the UIDs assigned to it, and its index in the chunk sequence.\n\n Raises:\n ValueError: If the redundancy factor R is greater than the number of available UIDs.\n\n Note:\n - This function is designed to be used in distributed storage or processing systems where\n data needs to be split and stored across multiple nodes with redundancy.\n - It evenly divides the data into chunks and assigns UIDs to each chunk while ensuring that\n the redundancy requirements are met.\n \"\"\"\n\n available_uids = await get_available_query_miners(self, k=k, exclude=exclude)\n chunk_size = chunk_size or optimal_chunk_size(data_size, len(available_uids), R)\n available_uids = adjust_uids_to_multiple(available_uids, R)\n chunk_indices = calculate_chunk_indices(data_size, chunk_size)\n\n if R > len(available_uids):\n raise ValueError(\n \"Redundancy factor cannot be greater than the number of available UIDs.\"\n )\n\n # Create initial UID groups\n initial_uid_groups = partition_uids(available_uids, R)\n uid_groups = list(initial_uid_groups)\n\n # If more groups are needed, start reusing UIDs\n total_chunks_needed = data_size // chunk_size\n while len(uid_groups) < total_chunks_needed:\n for group in cycle(initial_uid_groups):\n if len(uid_groups) >= total_chunks_needed:\n break\n uid_groups.append(group)\n\n for i, ((start, end), uid_group) in enumerate(zip(chunk_indices, uid_groups)):\n yield {\n \"chunk_size\": chunk_size,\n \"start_idx\": start,\n \"end_idx\": end,\n \"uids\": uid_group,\n \"chunk_index\": i,\n }" }, { "identifier": "encrypt_data", "path": "storage/validator/encryption.py", "snippet": "NACL_SALT = b\"\\x13q\\x83\\xdf\\xf1Z\\t\\xbc\\x9c\\x90\\xb5Q\\x879\\xe9\\xb1\"\ndef encrypt_aes(filename: typing.Union[bytes, str], key: bytes) -> bytes:\ndef decrypt_aes(cipher_text: bytes, key: bytes, nonce: bytes, tag: bytes) -> bytes:\ndef encrypt_data_with_wallet(data: bytes, wallet) -> bytes:\ndef decrypt_data_with_coldkey_private_key(\n encrypted_data: bytes, private_key: typing.Union[str, bytes]\n) -> bytes:\ndef decrypt_data_with_wallet(encrypted_data: bytes, wallet) -> bytes:\ndef encrypt_data_with_aes_and_serialize(\n data: bytes, wallet: bt.wallet\n) -> typing.Tuple[bytes, bytes]:\ndef decrypt_data_and_deserialize(\n encrypted_data: bytes, encryption_payload: bytes, wallet: bt.wallet\n) -> bytes:\ndef decrypt_data_and_deserialize_with_coldkey_private_key(\n encrypted_data: bytes,\n encryption_payload: bytes,\n private_key: typing.Union[str, bytes],\n) -> bytes:\ndef serialize_nacl_encrypted_message(encrypted_message: EncryptedMessage) -> str:\ndef deserialize_nacl_encrypted_message(serialized_data: str) -> EncryptedMessage:\ndef setup_encryption_wallet(\n wallet_name=\"encryption\",\n wallet_hotkey=\"encryption\",\n password=\"dummy_password\",\n n_words=12,\n use_encryption=False,\n overwrite=False,\n):" }, { "identifier": "verify_store_with_seed", "path": "storage/validator/verify.py", "snippet": "def verify_store_with_seed(synapse, b64_encrypted_data, seed, verbose=False):\n \"\"\"\n Verifies the storing process in a decentralized network using the provided synapse and seed.\n This function decodes the data, reconstructs the hash using the seed, and verifies it against the commitment hash.\n It also opens the commitment to validate the process.\n Args:\n synapse (Synapse): The synapse object containing store process details.\n verbose (bool, optional): Enables verbose logging for debugging. Defaults to False.\n Returns:\n bool: True if the storing process is verified successfully, False otherwise.\n \"\"\"\n try:\n encrypted_data = base64.b64decode(b64_encrypted_data)\n except Exception as e:\n bt.logging.error(f\"Could not decode store data with error: {e}\")\n return False\n\n seed_value = str(seed).encode()\n reconstructed_hash = hash_data(encrypted_data + seed_value)\n\n # e.g. send synapse.commitment_hash as an int for consistency\n if synapse.commitment_hash != str(reconstructed_hash):\n if verbose:\n bt.logging.error(f\"Initial commitment hash != hash(data + seed)\")\n bt.logging.error(f\"commitment hash : {synapse.commitment_hash}\")\n bt.logging.error(f\"reconstructed hash: {reconstructed_hash}\")\n bt.logging.error(f\"synapse : {synapse.dendrite.dict()}\")\n return False\n\n committer = ECCommitment(\n hex_to_ecc_point(synapse.g, synapse.curve),\n hex_to_ecc_point(synapse.h, synapse.curve),\n )\n commitment = hex_to_ecc_point(synapse.commitment, synapse.curve)\n\n if not committer.open(\n commitment,\n hash_data(encrypted_data + str(seed).encode()),\n synapse.randomness,\n ):\n bt.logging.error(f\"Opening commitment failed\")\n bt.logging.error(f\"synapse: {synapse.dendrite.dict()}\")\n return False\n\n return True" }, { "identifier": "apply_reward_scores", "path": "storage/validator/reward.py", "snippet": "def apply_reward_scores(\n self, uids, responses, rewards, timeout: float, mode: str = \"sigmoid\"\n):\n \"\"\"\n Adjusts the moving average scores for a set of UIDs based on their response times and reward values.\n\n This should reflect the distribution of axon response times (minmax norm)\n\n Parameters:\n uids (List[int]): A list of UIDs for which rewards are being applied.\n responses (List[Response]): A list of response objects received from the nodes.\n rewards (torch.FloatTensor): A tensor containing the computed reward values.\n \"\"\"\n if mode not in [\"sigmoid\", \"minmax\"]:\n raise ValueError(f\"Invalid mode: {mode}\")\n\n if self.config.neuron.verbose:\n bt.logging.debug(f\"Applying rewards: {rewards}\")\n bt.logging.debug(f\"Reward shape: {rewards.shape}\")\n bt.logging.debug(f\"UIDs: {uids}\")\n\n scaled_rewards = scale_rewards(uids, responses, rewards, timeout=timeout, mode=mode)\n bt.logging.debug(f\"apply_reward_scores() Scaled rewards: {scaled_rewards}\")\n\n # Compute forward pass rewards\n # shape: [ metagraph.n ]\n scattered_rewards: torch.FloatTensor = self.moving_averaged_scores.scatter(\n 0, torch.tensor(uids).to(self.device), scaled_rewards\n ).to(self.device)\n bt.logging.trace(f\"Scattered rewards: {scattered_rewards}\")\n\n # Update moving_averaged_scores with rewards produced by this step.\n # shape: [ metagraph.n ]\n alpha: float = self.config.neuron.moving_average_alpha\n self.moving_averaged_scores: torch.FloatTensor = alpha * scattered_rewards + (\n 1 - alpha\n ) * self.moving_averaged_scores.to(self.device)\n bt.logging.trace(f\"Updated moving avg scores: {self.moving_averaged_scores}\")" }, { "identifier": "add_metadata_to_hotkey", "path": "storage/validator/database.py", "snippet": "async def add_metadata_to_hotkey(\n ss58_address: str, data_hash: str, metadata: Dict, database: aioredis.Redis\n):\n \"\"\"\n Associates a data hash and its metadata with a hotkey in Redis.\n\n Parameters:\n ss58_address (str): The primary key representing the hotkey.\n data_hash (str): The subkey representing the data hash.\n metadata (dict): The metadata to associate with the data hash. Includes the size of the data, the seed,\n and the encryption payload. E.g. {'size': 123, 'seed': 456, 'encryption_payload': 'abc'}.\n database (aioredis.Redis): The Redis client instance.\n \"\"\"\n # Serialize the metadata as a JSON string\n metadata_json = json.dumps(metadata)\n # Use HSET to associate the data hash with the hotkey\n key = f\"hotkey:{ss58_address}\"\n await database.hset(key, data_hash, metadata_json)\n bt.logging.trace(f\"Associated data hash {data_hash} with hotkey {ss58_address}.\")" }, { "identifier": "store_chunk_metadata", "path": "storage/validator/database.py", "snippet": "async def store_chunk_metadata(\n full_hash: str,\n chunk_hash: str,\n hotkeys: List[str],\n chunk_size: int,\n database: aioredis.Redis,\n):\n \"\"\"\n Store metadata for a specific file chunk.\n\n This function creates or updates the metadata for a chunk, including the associated hotkeys and chunk size.\n\n Parameters:\n - full_hash (str): The full hash of the file that the chunk belongs to.\n - chunk_hash (str): The hash of the chunk whose metadata is to be stored.\n - hotkeys (List[str]): A list of hotkeys associated with the chunk.\n - chunk_size (int): The size of the chunk in bytes.\n - database (aioredis.Redis): An instance of the Redis database.\n \"\"\"\n chunk_metadata_key = f\"chunk:{chunk_hash}\"\n existing_metadata = await database.hget(chunk_metadata_key, \"hotkeys\")\n if existing_metadata:\n existing_hotkeys = existing_metadata.decode().split(\",\")\n hotkeys = set(existing_hotkeys + hotkeys)\n metadata = {\"hotkeys\": \",\".join(hotkeys), \"size\": chunk_size}\n\n await database.hmset(chunk_metadata_key, metadata)" }, { "identifier": "store_file_chunk_mapping_ordered", "path": "storage/validator/database.py", "snippet": "async def store_file_chunk_mapping_ordered(\n full_hash: str,\n chunk_hashes: List[str],\n chunk_indices: List[str],\n database: aioredis.Redis,\n encryption_payload: Optional[Union[bytes, dict]] = None,\n):\n \"\"\"\n Store an ordered mapping of file chunks in the database.\n\n This function takes a file's full hash and the hashes of its individual chunks, along with their\n respective indices, and stores them in a sorted set in the Redis database. The order is preserved\n based on the chunk index.\n\n Parameters:\n - full_hash (str): The full hash of the file.\n - chunk_hashes (List[str]): A list of hashes for the individual chunks of the file.\n - chunk_indices (List[int]): A list of indices corresponding to each chunk hash.\n - database (aioredis.Redis): An instance of the Redis database.\n - encryption_payload (Optional[Union[bytes, dict]]): The encryption payload to store with the file.\n \"\"\"\n key = f\"file:{full_hash}\"\n for chunk_index, chunk_hash in zip(chunk_indices, chunk_hashes):\n await database.zadd(key, {chunk_hash: chunk_index})\n\n # Store the encryption payload if provided\n if encryption_payload:\n if isinstance(encryption_payload, dict):\n encryption_payload = json.dumps(encryption_payload)\n await database.set(f\"payload:{full_hash}\", encryption_payload)" }, { "identifier": "get_ordered_metadata", "path": "storage/validator/database.py", "snippet": "async def get_ordered_metadata(\n file_hash: str, database: aioredis.Redis\n) -> List[Dict[str, Union[str, List[str], int]]]:\n \"\"\"\n Retrieve the metadata for all chunks of a file in the order of their indices.\n\n This function calls `get_all_chunks_for_file` to fetch all chunks' metadata and then sorts\n them based on their indices to maintain the original file order.\n\n Parameters:\n - file_hash (str): The full hash of the file whose ordered metadata is to be retrieved.\n - database (aioredis.Redis): An instance of the Redis database.\n\n Returns:\n - List[dict]: A list of metadata dictionaries for each chunk, ordered by their chunk index.\n Returns None if no chunks are found.\n \"\"\"\n chunks_info = await get_all_chunks_for_file(file_hash, database)\n if chunks_info is None:\n return None\n\n ordered_chunks = sorted(chunks_info.items(), key=lambda x: x[0])\n return [chunk_info for _, chunk_info in ordered_chunks]" }, { "identifier": "hotkey_at_capacity", "path": "storage/validator/database.py", "snippet": "async def hotkey_at_capacity(\n hotkey: str, database: aioredis.Redis, verbose: bool = False\n) -> bool:\n \"\"\"\n Checks if the hotkey is at capacity.\n\n Parameters:\n database (aioredis.Redis): The Redis client instance.\n hotkey (str): The key representing the hotkey.\n\n Returns:\n True if the hotkey is at capacity, False otherwise.\n \"\"\"\n # Get the total storage used by the hotkey\n total_storage = await total_hotkey_storage(hotkey, database, verbose)\n # Check if the hotkey is at capacity\n byte_limit = await database.hget(f\"stats:{hotkey}\", \"storage_limit\")\n if byte_limit is None:\n if verbose:\n bt.logging.trace(f\"Could not find storage limit for {hotkey}.\")\n return False\n try:\n limit = int(byte_limit)\n except Exception as e:\n if verbose:\n bt.logging.trace(f\"Could not parse storage limit for {hotkey} | {e}.\")\n return False\n if total_storage >= limit:\n if verbose:\n bt.logging.trace(\n f\"Hotkey {hotkey} is at max capacity {limit // 1024**3} GB.\"\n )\n return True\n else:\n if verbose:\n bt.logging.trace(\n f\"Hotkey {hotkey} has {(limit - total_storage) // 1024**3} GB free.\"\n )\n return False" }, { "identifier": "update_statistics", "path": "storage/validator/bonding.py", "snippet": "async def update_statistics(\n ss58_address: str, success: bool, task_type: str, database: aioredis.Redis\n):\n \"\"\"\n Updates the statistics of a miner in the decentralized storage system.\n If the miner is not already registered, they are registered first. This function updates\n the miner's statistics based on the task performed (store, challenge, retrieve) and whether\n it was successful.\n Args:\n ss58_address (str): The unique address (hotkey) of the miner.\n success (bool): Indicates whether the task was successful or not.\n task_type (str): The type of task performed ('store', 'challenge', 'retrieve').\n database (redis.Redis): The Redis client instance for database operations.\n \"\"\"\n # Check and see if this miner is registered.\n if not await miner_is_registered(ss58_address, database):\n bt.logging.debug(f\"Registering new miner {ss58_address}...\")\n await register_miner(ss58_address, database)\n\n # Update statistics in the stats hash\n stats_key = f\"stats:{ss58_address}\"\n\n if task_type in [\"store\", \"challenge\", \"retrieve\"]:\n await database.hincrby(stats_key, f\"{task_type}_attempts\", 1)\n if success:\n await database.hincrby(stats_key, f\"{task_type}_successes\", 1)\n\n # Transition retireval -> retrieve successes (legacy)\n legacy_retrieve_successes = await database.hget(stats_key, \"retrieval_successes\")\n if legacy_retrieve_successes != None:\n await database.hset(\n stats_key, \"retrieve_successes\", int(legacy_retrieve_successes)\n )\n await database.hdel(stats_key, \"retrieval_successes\")\n\n # Transition retireval -> retrieve attempts (legacy)\n legacy_retrieve_attempts = await database.hget(stats_key, \"retrieval_attempts\")\n if legacy_retrieve_attempts != None:\n await database.hset(\n stats_key, \"retrieve_attempts\", int(legacy_retrieve_attempts)\n )\n await database.hdel(stats_key, \"retrieval_attempts\")\n\n # Update the total successes that we rollover every epoch\n if await database.hget(stats_key, \"total_successes\") == None:\n store_successes = int(await database.hget(stats_key, \"store_successes\"))\n challenge_successes = int(await database.hget(stats_key, \"challenge_successes\"))\n retrieval_successes = int(await database.hget(stats_key, \"retrieve_successes\"))\n total_successes = store_successes + retrieval_successes + challenge_successes\n await database.hset(stats_key, \"total_successes\", total_successes)\n if success:\n await database.hincrby(stats_key, \"total_successes\", 1)" }, { "identifier": "create_reward_vector", "path": "storage/validator/reward.py", "snippet": "async def create_reward_vector(\n self,\n synapse: Union[Store, Retrieve, Challenge],\n rewards: torch.FloatTensor,\n uids: List[int],\n responses: List[Synapse],\n event: EventSchema,\n callback: callable,\n fail_callback: callable,\n):\n # Determine if the commitment is valid\n success = False\n if isinstance(synapse, Store):\n verify_fn = partial(\n verify_store_with_seed,\n b64_encrypted_data=synapse.encrypted_data,\n seed=synapse.seed,\n )\n task_type = \"store\"\n failure_reward = STORE_FAILURE_REWARD\n elif isinstance(synapse, Retrieve):\n verify_fn = partial(verify_retrieve_with_seed, seed=synapse.seed)\n task_type = \"retrieve\"\n failure_reward = RETRIEVAL_FAILURE_REWARD\n elif isinstance(synapse, Challenge):\n verify_fn = partial(verify_challenge_with_seed, seed=synapse.seed)\n task_type = \"challenge\"\n failure_reward = CHALLENGE_FAILURE_REWARD\n else:\n raise ValueError(f\"Invalid synapse type: {type(synapse)}\")\n\n for idx, (uid, response) in enumerate(zip(uids, responses)):\n # Verify the commitment\n hotkey = self.metagraph.hotkeys[uid]\n\n # Determine if the commitment is valid\n success = verify_fn(synapse=response)\n if success:\n bt.logging.debug(\n f\"Successfully verified {synapse.__class__} commitment from UID: {uid} | hotkey: {hotkey}\"\n )\n await callback(hotkey, idx, uid, response)\n else:\n bt.logging.error(\n f\"create_reward_vector() Failed to verify store commitment from UID: {uid} | hotkey: {hotkey}\"\n )\n fail_callback(uid)\n\n # Update the storage statistics\n await update_statistics(\n ss58_address=hotkey,\n success=success,\n task_type=task_type,\n database=self.database,\n )\n\n # Apply reward for this task\n tier_factor = await get_tier_factor(hotkey, self.database)\n rewards[idx] = 1.0 * tier_factor if success else failure_reward * tier_factor\n\n event.successful.append(success)\n event.uids.append(uid)\n event.completion_times.append(response.dendrite.process_time)\n event.task_status_messages.append(response.dendrite.status_message)\n event.task_status_codes.append(response.dendrite.status_code)" }, { "identifier": "ping_and_retry_uids", "path": "storage/validator/network.py", "snippet": "async def ping_and_retry_uids(\n self, k: int = None, max_retries: int = 3, exclude_uids: typing.List[int] = []\n):\n \"\"\"\n Fetch available uids to minimize waiting for timeouts if they're going to fail anyways...\n \"\"\"\n # Select initial subset of miners to query\n uids = await get_available_query_miners(\n self, k=k or self.config.neuron.store_redundancy, exclude=exclude_uids\n )\n bt.logging.debug(\"initial ping_and_retry() uids:\", uids)\n\n retries = 0\n successful_uids = set()\n failed_uids = set()\n while len(successful_uids) < k and retries < max_retries:\n # Ping all UIDs\n current_successful_uids, current_failed_uids = await ping_uids(self, uids)\n successful_uids.update(current_successful_uids)\n failed_uids.update(current_failed_uids)\n\n # If enough UIDs are successful, select the first k items\n if len(successful_uids) >= k:\n uids = list(successful_uids)[:k]\n break\n\n # Reroll for k UIDs excluding the successful ones\n new_uids = await get_available_query_miners(\n self, k=k, exclude=list(successful_uids.union(failed_uids))\n )\n bt.logging.debug(f\"ping_and_retry() new uids: {new_uids}\")\n retries += 1\n\n # Log if the maximum retries are reached without enough successful UIDs\n if len(successful_uids) < k:\n bt.logging.warning(\n f\"Insufficient successful UIDs for k: {k} Success UIDs {successful_uids} Failed UIDs: {failed_uids}\"\n )\n\n return list(successful_uids)[:k], failed_uids" }, { "identifier": "compute_and_ping_chunks", "path": "storage/validator/network.py", "snippet": "async def compute_and_ping_chunks(self, distributions):\n \"\"\"\n Asynchronously evaluates the availability of miners for the given chunk distributions by pinging them.\n Rerolls the distribution to replace failed miners, ensuring exactly k successful miners are selected.\n\n Parameters:\n distributions (list of dicts): A list of chunk distribution dictionaries, each containing\n information about chunk indices and assigned miner UIDs.\n\n Returns:\n list of dicts: The updated list of chunk distributions with exactly k successful miner UIDs.\n\n Note:\n - This function is crucial for ensuring that data chunks are assigned to available and responsive miners.\n - Pings miners based on their UIDs and updates the distributions accordingly.\n - Logs the new set of UIDs and distributions for traceability.\n \"\"\"\n max_retries = 3 # Define the maximum number of retries\n target_number_of_uids = len(\n distributions[0][\"uids\"]\n ) # Assuming k is the length of the uids in the first distribution\n\n for dist in distributions:\n retries = 0\n successful_uids = set()\n\n while len(successful_uids) < target_number_of_uids and retries < max_retries:\n # Ping all UIDs\n current_successful_uids, _ = await ping_uids(self, dist[\"uids\"])\n successful_uids.update(current_successful_uids)\n\n # If enough UIDs are successful, select the first k items\n if len(successful_uids) >= target_number_of_uids:\n dist[\"uids\"] = tuple(sorted(successful_uids)[:target_number_of_uids])\n break\n\n # Reroll for k UIDs excluding the successful ones\n new_uids = await get_available_query_miners(\n self, k=target_number_of_uids, exclude=successful_uids\n )\n bt.logging.trace(\"compute_and_ping_chunks() new uids:\", new_uids)\n\n # Update the distribution with new UIDs\n dist[\"uids\"] = tuple(new_uids)\n retries += 1\n\n # Log if the maximum retries are reached without enough successful UIDs\n if len(successful_uids) < target_number_of_uids:\n bt.logging.warning(\n f\"compute_and_ping_chunks(): Insufficient successful UIDs for distribution: {dist}\"\n )\n\n # Continue with your logic using the updated distributions\n bt.logging.trace(\"new distributions:\", distributions)\n return distributions" }, { "identifier": "reroll_distribution", "path": "storage/validator/network.py", "snippet": "async def reroll_distribution(self, distribution, failed_uids):\n \"\"\"\n Asynchronously rerolls a single data chunk distribution by replacing failed miner UIDs with new, available ones.\n This is part of the error handling process in data distribution to ensure that each chunk is reliably stored.\n\n Parameters:\n distribution (dict): The original chunk distribution dictionary, containing chunk information and miner UIDs.\n failed_uids (list of int): List of UIDs that failed in the original distribution and need replacement.\n\n Returns:\n dict: The updated chunk distribution with new miner UIDs replacing the failed ones.\n\n Note:\n - This function is typically used when certain miners are unresponsive or unable to store the chunk.\n - Ensures that each chunk has the required number of active miners for redundancy.\n \"\"\"\n # Get new UIDs to replace the failed ones\n new_uids = await get_available_query_miners(\n self, k=len(failed_uids), exclude=failed_uids\n )\n distribution[\"uids\"] = new_uids\n return distribution" }, { "identifier": "compute_chunk_distribution_mut_exclusive_numpy_reuse_uids", "path": "storage/validator/utils.py", "snippet": "async def compute_chunk_distribution_mut_exclusive_numpy_reuse_uids(\n self, data_size, R, k, chunk_size=None, exclude=None\n):\n \"\"\"\n Asynchronously computes a distribution of data chunks across a set of unique identifiers (UIDs),\n taking into account redundancy and chunk size optimization. This function is useful for distributing\n data across a network of nodes or miners in a way that ensures redundancy and optimal utilization.\n\n Parameters:\n self: Reference to the class instance from which this method is called.\n data_size (int): The total size of the data to be distributed, in bytes.\n R (int): Redundancy factor, denoting the number of times each chunk should be replicated.\n k (int): The number of unique identifiers (UIDs) to be involved in the distribution.\n chunk_size (int, optional): The size of each data chunk. If not provided, an optimal chunk size\n is calculated based on the data size and the number of UIDs.\n\n Yields:\n dict: A dictionary representing a chunk's metadata, including its size, start index, end index,\n the UIDs assigned to it, and its index in the chunk sequence.\n\n Raises:\n ValueError: If the redundancy factor R is greater than the number of available UIDs.\n\n Note:\n - This function is designed to be used in distributed storage or processing systems where\n data needs to be split and stored across multiple nodes with redundancy.\n - It evenly divides the data into chunks and assigns UIDs to each chunk while ensuring that\n the redundancy requirements are met.\n \"\"\"\n\n available_uids = await get_available_query_miners(self, k=k, exclude=exclude)\n chunk_size = chunk_size or optimal_chunk_size(data_size, len(available_uids), R)\n available_uids = adjust_uids_to_multiple(available_uids, R)\n chunk_indices = calculate_chunk_indices(data_size, chunk_size)\n\n if R > len(available_uids):\n raise ValueError(\n \"Redundancy factor cannot be greater than the number of available UIDs.\"\n )\n\n # Create initial UID groups\n initial_uid_groups = partition_uids(available_uids, R)\n uid_groups = list(initial_uid_groups)\n\n # If more groups are needed, start reusing UIDs\n total_chunks_needed = data_size // chunk_size\n while len(uid_groups) < total_chunks_needed:\n for group in cycle(initial_uid_groups):\n if len(uid_groups) >= total_chunks_needed:\n break\n uid_groups.append(group)\n\n for i, ((start, end), uid_group) in enumerate(zip(chunk_indices, uid_groups)):\n yield {\n \"chunk_size\": chunk_size,\n \"start_idx\": start,\n \"end_idx\": end,\n \"uids\": uid_group,\n \"chunk_index\": i,\n }" } ]
import os import sys import copy import time import torch import base64 import typing import asyncio import aioredis import bittensor as bt import websocket from pprint import pformat from pyinstrument import Profiler from Crypto.Random import get_random_bytes, random from dataclasses import asdict from storage.validator.event import EventSchema from storage import protocol from storage.shared.ecc import ( hash_data, setup_CRS, ecc_point_to_hex, ) from storage.shared.utils import b64_encode from storage.validator.utils import ( make_random_file, compute_chunk_distribution_mut_exclusive_numpy_reuse_uids, ) from storage.validator.encryption import encrypt_data from storage.validator.verify import verify_store_with_seed from storage.validator.reward import apply_reward_scores from storage.validator.database import ( add_metadata_to_hotkey, store_chunk_metadata, store_file_chunk_mapping_ordered, get_ordered_metadata, hotkey_at_capacity, ) from storage.validator.bonding import update_statistics from .reward import create_reward_vector from .network import ping_and_retry_uids, compute_and_ping_chunks, reroll_distribution from .utils import compute_chunk_distribution_mut_exclusive_numpy_reuse_uids
10,992
# Update event log with moving averaged scores event.moving_averaged_scores = self.moving_averaged_scores.tolist() return event async def store_random_data(self): """ Stores data on the network and ensures it is correctly committed by the miners. Parameters: - data (bytes, optional): The data to be stored. - wallet (bt.wallet, optional): The wallet to be used for encrypting the data. Returns: - The status of the data storage operation. """ # Setup CRS for this round of validation g, h = setup_CRS(curve=self.config.neuron.curve) # Make a random bytes file to test the miner if none provided data = make_random_file(maxsize=self.config.neuron.maxsize) bt.logging.debug(f"Random store data size: {sys.getsizeof(data)}") # Encrypt the data # TODO: create and use a throwaway wallet (never decrypable) encrypted_data, encryption_payload = encrypt_data(data, self.encryption_wallet) return await store_encrypted_data( self, encrypted_data, encryption_payload, k=self.config.neuron.store_sample_size, ttl=self.config.neuron.data_ttl, ) async def store_broadband( self, encrypted_data, encryption_payload, R=3, k=10, data_hash=None, exclude_uids=None, ): """ Asynchronously stores encrypted data across a distributed network by splitting it into chunks and assigning these chunks to various miners for storage. This method ensures redundancy and efficient data distribution while handling network requests concurrently. The process includes chunking the data, selecting miners for storage, and verifying the integrity of stored data through response validation. Parameters: encrypted_data (bytes): The encrypted data to be stored across the network. encryption_payload (dict): Additional payload information required for encryption. R (int, optional): The redundancy factor, denoting how many times each chunk is replicated. Default is 3. k (int, optional): The number of miners to query for each chunk. Default is 10. data_hash (str, optional): The hash of the data to be stored. If not provided, compute it. Default is None. exclude_uids: (list of int, optional): A list of UIDs to exclude from the storage process. Default is None. Returns: str: The hash of the full data, representing its unique identifier in the network. Raises: Exception: If the process of creating initial distributions fails after multiple retries. Note: - Uses a semaphore to limit the number of concurrent network requests. - Employs a retry mechanism for handling network and miner availability issues. - Logs various stages of the process for debugging and monitoring purposes. """ if self.config.neuron.profile: # Create a profiler instance profiler = Profiler() profiler.start() semaphore = asyncio.Semaphore(self.config.neuron.semaphore_size) async def store_chunk_group(chunk_hash, chunk, uids): event = EventSchema( task_name="Store", successful=[], completion_times=[], task_status_messages=[], task_status_codes=[], block=self.subtensor.get_current_block(), uids=[], step_length=0.0, best_uid="", best_hotkey="", rewards=[], moving_averaged_scores=[], ) g, h = setup_CRS(curve=self.config.neuron.curve) bt.logging.debug(f"type(chunk): {type(chunk)}") bt.logging.debug(f"chunk: {chunk[:100]}") chunk = chunk.encode("utf-8") if isinstance(chunk, str) else chunk b64_encoded_chunk = await asyncio.to_thread(base64.b64encode, chunk) b64_encoded_chunk = b64_encoded_chunk.decode("utf-8") bt.logging.debug(f"b64_encoded_chunk: {b64_encoded_chunk[:100]}") random_seed = get_random_bytes(32).hex() synapse = protocol.Store( encrypted_data=b64_encoded_chunk, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=random_seed, ) uids = [ uid for uid in uids
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. async def store_encrypted_data( self, encrypted_data: typing.Union[bytes, str], encryption_payload: dict, exclude_uids: typing.List[str] = [], ttl: int = 0, k: int = None, max_retries: int = 3, ) -> bool: event = EventSchema( task_name="Store", successful=[], completion_times=[], task_status_messages=[], task_status_codes=[], block=self.subtensor.get_current_block(), uids=[], step_length=0.0, best_uid="", best_hotkey="", rewards=[], moving_averaged_scores=[], ) start_time = time.time() encrypted_data = ( encrypted_data.encode("utf-8") if isinstance(encrypted_data, str) else encrypted_data ) # Setup CRS for this round of validation g, h = setup_CRS(curve=self.config.neuron.curve) # Hash the data data_hash = hash_data(encrypted_data) # Convert to base64 for compactness # TODO: Don't do this if it's already b64 encoded. (Check first) b64_encrypted_data = base64.b64encode(encrypted_data).decode("utf-8") if self.config.neuron.verbose: bt.logging.debug(f"storing user data: {encrypted_data[:12]}...") bt.logging.debug(f"storing user hash: {data_hash}") bt.logging.debug(f"b64 encrypted data: {b64_encrypted_data[:12]}...") synapse = protocol.Store( encrypted_data=b64_encrypted_data, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=get_random_bytes(32).hex(), # 256-bit seed ) # Select subset of miners to query (e.g. redunancy factor of N) uids, _ = await ping_and_retry_uids( self, k=k or self.config.neuron.store_redundancy, max_retries=max_retries, exclude_uids=exclude_uids, ) bt.logging.debug(f"store_encrypted_data() uids: {uids}") axons = [self.metagraph.axons[uid] for uid in uids] failed_uids = [None] retries = 0 while len(failed_uids) and retries < max_retries: if failed_uids == [None]: # initial loop failed_uids = [] # Broadcast the query to selected miners on the network. responses = await self.dendrite( axons, synapse, deserialize=False, timeout=self.config.neuron.store_timeout, ) # Compute the rewards for the responses given proc time. rewards: torch.FloatTensor = torch.zeros( len(responses), dtype=torch.float32 ).to(self.device) async def success(hotkey, idx, uid, response): # Prepare storage for the data for particular miner response_storage = { "prev_seed": synapse.seed, "size": sys.getsizeof(encrypted_data), # in bytes, not len(data) "encryption_payload": encryption_payload, } bt.logging.trace(f"Storing UID {uid} data {pformat(response_storage)}") # Store in the database according to the data hash and the miner hotkey await add_metadata_to_hotkey( hotkey, data_hash, response_storage, self.database, ) if ttl > 0: await self.database.expire( f"{hotkey}:{data_hash}", ttl, ) bt.logging.debug( f"Stored data in database with hotkey: {hotkey} | uid {uid} | {data_hash}" ) def failure(uid): failed_uids.append(uid) await create_reward_vector( self, synapse, rewards, uids, responses, event, success, failure ) event.rewards.extend(rewards.tolist()) if self.config.neuron.verbose and self.config.neuron.log_responses: bt.logging.debug(f"Store responses round: {retries}") [ bt.logging.debug(f"Store response: {response.dendrite.dict()}") for response in responses ] bt.logging.trace(f"Applying store rewards for retry: {retries}") apply_reward_scores( self, uids, responses, rewards, timeout=self.config.neuron.store_timeout, mode=self.config.neuron.reward_mode, ) # Get a new set of UIDs to query for those left behind if failed_uids != []: bt.logging.trace(f"Failed to store on uids: {failed_uids}") uids, _ = await ping_and_retry_uids( self, k=len(failed_uids), exclude_uids=exclude_uids ) bt.logging.trace(f"Retrying with new uids: {uids}") axons = [self.metagraph.axons[uid] for uid in uids] failed_uids = [] # reset failed uids for next round retries += 1 # Calculate step length end_time = time.time() event.step_length = end_time - start_time # Determine the best UID based on rewards if event.rewards: best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__) event.best_uid = event.uids[best_index] event.best_hotkey = self.metagraph.hotkeys[event.best_uid] # Update event log with moving averaged scores event.moving_averaged_scores = self.moving_averaged_scores.tolist() return event async def store_random_data(self): """ Stores data on the network and ensures it is correctly committed by the miners. Parameters: - data (bytes, optional): The data to be stored. - wallet (bt.wallet, optional): The wallet to be used for encrypting the data. Returns: - The status of the data storage operation. """ # Setup CRS for this round of validation g, h = setup_CRS(curve=self.config.neuron.curve) # Make a random bytes file to test the miner if none provided data = make_random_file(maxsize=self.config.neuron.maxsize) bt.logging.debug(f"Random store data size: {sys.getsizeof(data)}") # Encrypt the data # TODO: create and use a throwaway wallet (never decrypable) encrypted_data, encryption_payload = encrypt_data(data, self.encryption_wallet) return await store_encrypted_data( self, encrypted_data, encryption_payload, k=self.config.neuron.store_sample_size, ttl=self.config.neuron.data_ttl, ) async def store_broadband( self, encrypted_data, encryption_payload, R=3, k=10, data_hash=None, exclude_uids=None, ): """ Asynchronously stores encrypted data across a distributed network by splitting it into chunks and assigning these chunks to various miners for storage. This method ensures redundancy and efficient data distribution while handling network requests concurrently. The process includes chunking the data, selecting miners for storage, and verifying the integrity of stored data through response validation. Parameters: encrypted_data (bytes): The encrypted data to be stored across the network. encryption_payload (dict): Additional payload information required for encryption. R (int, optional): The redundancy factor, denoting how many times each chunk is replicated. Default is 3. k (int, optional): The number of miners to query for each chunk. Default is 10. data_hash (str, optional): The hash of the data to be stored. If not provided, compute it. Default is None. exclude_uids: (list of int, optional): A list of UIDs to exclude from the storage process. Default is None. Returns: str: The hash of the full data, representing its unique identifier in the network. Raises: Exception: If the process of creating initial distributions fails after multiple retries. Note: - Uses a semaphore to limit the number of concurrent network requests. - Employs a retry mechanism for handling network and miner availability issues. - Logs various stages of the process for debugging and monitoring purposes. """ if self.config.neuron.profile: # Create a profiler instance profiler = Profiler() profiler.start() semaphore = asyncio.Semaphore(self.config.neuron.semaphore_size) async def store_chunk_group(chunk_hash, chunk, uids): event = EventSchema( task_name="Store", successful=[], completion_times=[], task_status_messages=[], task_status_codes=[], block=self.subtensor.get_current_block(), uids=[], step_length=0.0, best_uid="", best_hotkey="", rewards=[], moving_averaged_scores=[], ) g, h = setup_CRS(curve=self.config.neuron.curve) bt.logging.debug(f"type(chunk): {type(chunk)}") bt.logging.debug(f"chunk: {chunk[:100]}") chunk = chunk.encode("utf-8") if isinstance(chunk, str) else chunk b64_encoded_chunk = await asyncio.to_thread(base64.b64encode, chunk) b64_encoded_chunk = b64_encoded_chunk.decode("utf-8") bt.logging.debug(f"b64_encoded_chunk: {b64_encoded_chunk[:100]}") random_seed = get_random_bytes(32).hex() synapse = protocol.Store( encrypted_data=b64_encoded_chunk, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=random_seed, ) uids = [ uid for uid in uids
if not await hotkey_at_capacity(self.metagraph.hotkeys[uid], self.database)
15
2023-10-26 18:54:47+00:00
16k
Eclectic-Sheep/sheeprlhf
sheeprlhf/task/train/ppo.py
[ { "identifier": "PPOAgent", "path": "sheeprlhf/agent/ppo.py", "snippet": "class PPOAgent:\n \"\"\"Agent model for PPO training.\"\"\"\n\n _reference: ActorModel\n _reward: RewardModel\n _finetune_mode: FINETUNE_MODE\n _actor: Optional[ActorModel] = None\n _critic: Optional[CriticModel] = None\n _same_actor_critic: bool = False\n _share_actor_critic: bool = False\n _share_critic_reward: bool = False\n\n _sft_checkpoint_path: str\n _sft_model_cfg: ModelConfig\n _rm_checkpoint_path: str\n _rm_model_cfg: ModelConfig\n\n _lora_enabled: bool\n _init_critic_with_reward: bool\n\n def __init__(self, model_cfg: ModelConfig, task_cfg: PPOConfig) -> None:\n self.model_cfg = model_cfg\n self._init_critic_with_reward = task_cfg.init_critic_with_reward\n\n self._sft_model_cfg, self._sft_checkpoint_path = get_model_checkpoint(\n task_cfg.sft_experiment_dir, task_cfg.sft_model_name\n )\n sft_model_name = self._sft_model_cfg.repo_name\n\n self._rm_model_cfg, self._rm_checkpoint_path = get_model_checkpoint(\n task_cfg.rm_experiment_dir, task_cfg.sft_model_name\n )\n rm_model_name = self._rm_model_cfg.repo_name\n\n self._reference = ActorModel(model_cfg=self._sft_model_cfg)\n self._reward = RewardModel(model_cfg=self._rm_model_cfg)\n\n self._same_actor_critic = sft_model_name == rm_model_name\n self._finetune_mode = model_cfg.finetune_mode\n self._lora_enabled = self._finetune_mode == FINETUNE_MODE.LORA\n if not self._init_critic_with_reward:\n if not (self._lora_enabled and self._same_actor_critic):\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor = ActorModel(model_cfg=self._sft_model_cfg)\n self._critic = CriticModel(model_cfg=self._sft_model_cfg)\n else:\n self._share_actor_critic = True\n\n else:\n if not self._lora_enabled:\n self._actor = ActorModel(model_cfg=self._sft_model_cfg)\n self._critic = CriticModel(model_cfg=self._rm_model_cfg)\n else:\n self._share_critic_reward = True\n\n def load_checkpoint(self, device: torch.device) -> None:\n \"\"\"Load checkpoints for Actor, Critic and Reward models.\"\"\"\n self._reference.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n self._reward.load_checkpoint(\n path=self._rm_checkpoint_path, device=device, model_cfg=self._rm_model_cfg, freeze=True\n )\n if not self._init_critic_with_reward:\n if not (self._lora_enabled and self._same_actor_critic):\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n self._critic.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n else:\n if not self._lora_enabled:\n self._critic.load_checkpoint(\n path=self._rm_checkpoint_path, device=device, model_cfg=self._rm_model_cfg, freeze=True\n )\n self._actor.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n\n def setup_finetuning(self, model_cfg: Optional[ModelConfig] = None) -> None:\n \"\"\"Setup finetuning for Actor, Critic and Reward models.\"\"\"\n if model_cfg is None:\n model_cfg = self.model_cfg\n lora_cfg = self.model_cfg.lora_cfg\n if not self._init_critic_with_reward:\n if self._lora_enabled and self._same_actor_critic:\n # here we can share reference model between Actor and Critic\n add_multiple_lora(self._reference, lora_cfg=lora_cfg, num=2)\n else:\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor.setup_finetuning(model_cfg=model_cfg)\n self._critic.setup_finetuning(model_cfg=model_cfg)\n else:\n if self._lora_enabled:\n add_lora(self._reward, lora_cfg=lora_cfg)\n add_lora(self._reference, lora_cfg=lora_cfg)\n else:\n self._critic.setup_finetuning(model_cfg=model_cfg)\n self._actor.setup_finetuning(model_cfg=model_cfg)\n trainable_parameter_summary(self.actor, show_names=False, tag=\"Actor\")\n trainable_parameter_summary(self.critic, show_names=False, tag=\"Critic\")\n\n @property\n def share_actor_critic(self) -> bool:\n \"\"\"Whether Actor and Critic models are shared.\"\"\"\n return self._share_actor_critic\n\n @property\n def share_critic_reward(self) -> bool:\n \"\"\"Whether Critic and Reward models are shared.\"\"\"\n return self._share_critic_reward\n\n @property\n def lora_enabled(self) -> bool:\n \"\"\"Whether LoRA is enabled.\"\"\"\n return self._lora_enabled\n\n @property\n def actor(self) -> ActorModel: # noqa: D102\n if self._share_actor_critic:\n enable_lora(self._reference)\n return select_lora(self._reference, index=0)\n elif self._lora_enabled and self._init_critic_with_reward:\n enable_lora(self._reference)\n return self._reference\n else:\n return self._actor\n\n @actor.setter\n def actor(self, actor: ActorModel) -> None:\n if self._lora_enabled and (self._share_actor_critic or self._init_critic_with_reward):\n self._reference = actor\n else:\n self._actor = actor\n\n @property\n def critic(self) -> CriticModel: # noqa: D102\n if self._share_actor_critic:\n enable_lora(self._reference)\n return select_lora(self._reference, index=1)\n elif self._share_critic_reward:\n enable_lora(self._reward)\n self._reward.disable_bias_gain()\n return self._reward\n else:\n return self._critic\n\n @critic.setter\n def critic(self, critic: CriticModel) -> None:\n if self._share_actor_critic:\n self._reference = critic\n elif self._share_critic_reward:\n self._reward = critic\n else:\n self._critic = critic\n\n @property\n def reference(self) -> ActorModel: # noqa: D102\n if self._share_actor_critic and self._lora_enabled:\n disable_lora(self._reference)\n\n return self._reference\n\n @reference.setter\n def reference(self, reference: ActorModel) -> None:\n self._reference = reference\n\n @property\n def reward(self) -> RewardModel: # noqa: D102\n if self._share_critic_reward:\n disable_lora(self._reward)\n self._reward.enable_bias_gain()\n return self._reward\n\n @reward.setter\n def reward(self, reward: RewardModel) -> None:\n self._reward = reward" }, { "identifier": "TextDataset", "path": "sheeprlhf/data/base.py", "snippet": "class TextDataset(torch.utils.data.Dataset):\n \"\"\"A simple text dataset for loading data from a pandas dataframe.\"\"\"\n\n def __init__(self, dataframe_path: str):\n self.dataframe = pd.read_pickle(dataframe_path).reset_index(drop=True)\n\n def __getitem__(self, index):\n row = self.dataframe.iloc[index].to_dict()\n return row\n\n def __len__(self):\n return len(self.dataframe)" }, { "identifier": "LeftPadCollate", "path": "sheeprlhf/data/collate.py", "snippet": "class LeftPadCollate:\n \"\"\"Data collator used for training.\n\n It is used when the data is left padded.\n \"\"\"\n\n def __init__(self, dim=1, pad_value=0, ignore_index=-1):\n self.dim = dim\n self.pad_value = pad_value\n self.ignore_index = ignore_index\n\n def __call__(self, batch): # noqa: D102\n input_ids = [list_to_tensor(item[\"chosen_input_ids\"])[: item[\"prompt_len\"]] for item in batch]\n # Use PyTorch's pad_sequence function\n # convert into left padding\n reversed_input_ids = [i.flip(dims=[0]) for i in input_ids]\n input_ids = pad_sequence(reversed_input_ids, batch_first=True, padding_value=self.pad_value).flip(dims=[1])\n attention_mask = input_ids.ne(self.pad_value).type(torch.int64)\n\n return {\n \"prompt_input_ids\": input_ids,\n \"prompt_attention_mask\": attention_mask,\n }" }, { "identifier": "policy_loss", "path": "sheeprlhf/loss/ppo.py", "snippet": "def policy_loss(\n log_probs: torch.Tensor,\n old_log_probs: torch.Tensor,\n advantages: torch.Tensor,\n clip_coeff: float,\n action_mask: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n \"\"\"Compute the policy loss for PPO.\"\"\"\n log_ratio = (log_probs - old_log_probs) * action_mask\n ratio = torch.exp(log_ratio)\n policy_loss_1 = -advantages * ratio\n policy_loss_2 = -advantages * torch.clamp(ratio, 1 - clip_coeff, 1 + clip_coeff)\n policy_loss = torch.max(policy_loss_1, policy_loss_2)\n if action_mask is not None:\n policy_loss = torch.sum(policy_loss * action_mask) / action_mask.sum()\n else:\n policy_loss = policy_loss.mean()\n return policy_loss" }, { "identifier": "value_loss", "path": "sheeprlhf/loss/ppo.py", "snippet": "def value_loss(\n values: torch.Tensor,\n old_values: torch.Tensor,\n returns: torch.Tensor,\n clip_coeff: float,\n action_mask: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n \"\"\"Compute the value loss for PPO.\"\"\"\n values_clipped = torch.clamp(values, old_values - clip_coeff, old_values + clip_coeff)\n value_loss1 = F.mse_loss(values, returns, reduction=\"none\")\n value_loss2 = F.mse_loss(values_clipped, returns, reduction=\"none\")\n value_loss = torch.max(value_loss1, value_loss2)\n if action_mask is not None:\n value_loss = torch.sum(value_loss * action_mask) / action_mask.sum()\n else:\n value_loss = value_loss.mean()\n return value_loss" }, { "identifier": "ActorModel", "path": "sheeprlhf/model/actor.py", "snippet": "class ActorModel(CasualModel):\n \"\"\"Actor model for PPO and DPO algorithms.\"\"\"\n\n def __init__(self, model_cfg: ModelConfig):\n super().__init__(model_cfg=model_cfg)\n\n def forward(self, **kwargs): # noqa: D102\n input_ids = kwargs[\"input_ids\"]\n if self.training and not self.model_cfg.use_attention_mask:\n kwargs.pop(\"attention_mask\")\n out = self.model(**kwargs)\n # Model predicts next token log probability here.\n actor_log_probs = F.log_softmax(out.logits[:, :-1, :], dim=-1)\n selected_actor_log_probs = actor_log_probs.gather(dim=-1, index=input_ids[:, 1:].unsqueeze(-1))\n return selected_actor_log_probs.squeeze(-1)" }, { "identifier": "DataConfig", "path": "sheeprlhf/structure/data.py", "snippet": "class DataConfig:\n \"\"\"The main class for processing data for the RLHF algorithm.\n\n Args:\n config_name: The name of the data configuration.\n dataset_name: The name of the dataset to load.\n root_dir: The directory where the processed data will be saved.\n tokenizer_name: The name of the tokenizer to use.\n max_length: The maximum length of the input tokens. Defaults to 512.\n max_prompt_length: The maximum length of the prompt tokens. Defaults to 512.\n num_samples: The number of samples to use. Defaults to None.\n ignore_index: The index to use for ignored tokens. Defaults to -1.\n remove_same_responses: Whether to remove samples with the same response. Defaults to True.\n remove_same_inputs: Whether to remove samples with the same input. Defaults to True.\n minimum_response_length: The minimum length of the response tokens. Defaults to 2.\n save_skipped_examples: Whether to save skipped examples. Defaults to False.\n validation_split: The validation split. Defaults to 0.1.\n reward_model_split: The reward model split. Defaults to 0.5.\n shuffle: Whether to shuffle the dataset. Defaults to True.\n seed: The random seed. Defaults to 42.\n split_names: The names of the splits. Defaults to (\"train\", \"val\", \"test\").\n \"\"\"\n\n _target_: str = \"sheeprlhf.data.DataProcessor\"\n config_name: str = MISSING\n dataset_name: str = MISSING\n root_dir: str = Path(\"./rlhf_data\")\n tokenizer_name: str = II(\"model.repo_name\")\n max_length: int = 256\n max_prompt_length: int = 128\n num_samples: Optional[int] = None\n ignore_index: int = -1\n remove_same_responses: bool = True\n remove_same_inputs: bool = True\n minimum_response_length: int = 5\n save_skipped_examples: bool = False\n shuffle: bool = True\n seed: int = II(\"seed\")\n validation_split: float = 0.1\n reward_model_split: float = 0.5\n split_names: Tuple[str] = (\"train\", \"test\")\n dry_run: bool = II(\"dry_run\")" }, { "identifier": "GenConfig", "path": "sheeprlhf/structure/generation.py", "snippet": "class GenConfig:\n \"\"\"The default configuration for the generator.\"\"\"\n\n # We cannot call this GenerationConfig because it will\n # conflict with transformers.GenerationConfig\n max_new_tokens: int = 128\n num_beams: int = 1\n do_sample: bool = True\n top_k: int = 50\n top_p: float = 1.0\n temperature: float = 1.0\n num_return_sequences: int = 1" }, { "identifier": "ModelConfig", "path": "sheeprlhf/structure/model.py", "snippet": "class ModelConfig:\n \"\"\"A generic configuration for models.\"\"\"\n\n config_name: str = MISSING\n repo_name: Optional[str] = None\n embedding_dim_name: Optional[str] = None\n transformer_name: Optional[str] = None\n casual: bool = True\n freeze_transformer: bool = False\n disable_dropout: bool = False\n library_cfg: HuggingFaceConfig = HuggingFaceConfig()\n finetune_mode: FINETUNE_MODE = FINETUNE_MODE.ALL\n lora_cfg: Optional[LORAConfig] = None\n use_attention_mask: bool = True\n fabric_empty_init: bool = True\n\n def __post_init__(self):\n if isinstance(self.finetune_mode, str):\n self.finetune_mode = FINETUNE_MODE(self.finetune_mode)" }, { "identifier": "PPOConfig", "path": "sheeprlhf/structure/task.py", "snippet": "class PPOConfig(TrainTaskConfig):\n \"\"\"Configuration class for PPO algorithm.\n\n Args:\n _name_: Name of the algorithm. Default is \"ppo\".\n rollout_size: Rollout size for PPO. For every training iteration this number of samples will\n be sampled from dataset and each will be used for generating response.\n rollout_mini_batch_size: Rollout mini batch size for PPO. This number is useful when the\n GPU memory is not sufficient for running all generation code with single batch.\n ppo_epochs: Number of ppo epochs to training. `ppo_step` function will be called `ppo_epochs` times\n normalize_rewards: Whether to whiten rewards\n normalize_advantages: Whether to whiten advantages\n adaptive_kl_coeff: Whether to use adaptively changing KL divergence coefficient\n clip_rewards: Whether to clip rewards\n reward_clip_value: Reward clipping value\n init_kl_coeff: KL divergence coefficient for comparing actor model with reference model.\n Higher value means more trust to reference model.\n target_kl_coeff: Target KL divergence coefficient\n clip_coeff: Clip coefficient for PPO loss\n vf_coeff: Value loss coefficient for PPO loss\n gae_gamma: Discount factor for GAE(Generalized Advantage Estimation)\n gae_lambd: Lambda for GAE(Generalized Advantage Estimation)\n sft_experiment_dir: Path to supervised finetuning experiment directory. Latest checkpoint will be loaded.\n rm_experiment_dir: Path to reward modelling experiment directory. Latest checkpoint will be loaded.\n sft_model_name: Name of the model to load from supervised finetuning experiment directory.\n If not provided, latest checkpoint will be loaded.\n rm_model_name: Name of the model to load from reward modelling experiment directory.\n If not provided, latest checkpoint will be loaded.\n actor_learning_rate: Learning rate for actor optimizer\n critic_learning_rate: Learning rate for critic optimizer\n init_critic_with_reward: Whether to initialize critic with reward model checkpoint or not.\n \"\"\"\n\n config_name: str = \"ppo\"\n rollout_size: int = 128\n rollout_mini_batch_size: int = 32\n ppo_epochs: int = 1\n normalize_rewards: bool = True\n normalize_advantages: bool = True\n adaptive_kl_coeff: bool = False\n clip_rewards: bool = True\n reward_clip_value: float = 5.0\n init_kl_coeff: float = 0.1\n target_kl_coeff: float = 0.1\n clip_coeff: float = 0.2\n vf_coeff: float = 0.1\n gae_gamma: float = 1.0\n gae_lambd: float = 0.95\n sft_experiment_dir: str = II(\"sft_experiment_dir\")\n rm_experiment_dir: str = II(\"rm_experiment_dir\")\n sft_model_name: Optional[str] = None\n rm_model_name: Optional[str] = None\n actor_learning_rate: float = 1e-6\n critic_learning_rate: float = 1e-6\n init_critic_with_reward: bool = True" }, { "identifier": "prepare_generation_config", "path": "sheeprlhf/utils/data.py", "snippet": "def prepare_generation_config(\n tokenizer: PreTrainedTokenizer, model_cfg: ModelConfig, gen_cfg: GenConfig, fabric: lightning.Fabric\n) -> Dict[str, Any]:\n \"\"\"Creates generation config for Hugginface models.\n\n In this function, we try to solve token problems for different models.\n \"\"\"\n gen_cfg_dict = asdict(gen_cfg)\n try:\n generation_config = GenerationConfig.from_pretrained(model_cfg.repo_name, **gen_cfg_dict)\n except EnvironmentError:\n # If the model does not have `generation_config.json` file, we create from scratch\n fabric.print(\"`generation_config.json` not found, creating `GenerationConfig` from scratch\")\n generation_config = GenerationConfig(**gen_cfg_dict)\n generation_config.pad_token_id = tokenizer.pad_token_id\n generation_config.eos_token_id = tokenizer.eos_token_id\n generation_config.bos_token_id = tokenizer.bos_token_id\n return generation_config" }, { "identifier": "validate_dataset", "path": "sheeprlhf/utils/data.py", "snippet": "def validate_dataset(fabric: lightning.Fabric, data_cfg: DataConfig) -> DataProcessor:\n \"\"\"Dataset validator.\n\n Validates the dataset for checking if it is required to re-create\n all preprocessing steps using tokenizers.\n \"\"\"\n os.environ.setdefault(\"TOKENIZERS_PARALLELISM\", \"true\")\n data_processor: DataProcessor = instantiate_from_config(data_cfg)\n full_path = data_processor.full_path\n create_dataset: bool = True\n if os.path.isdir(full_path):\n config_path = full_path / \"config.yaml\"\n if not config_path.exists():\n fabric.print(f\"Config file not found at {config_path} for the given dataset {data_cfg.config_name}\")\n fabric.print(\"Dataset will be recreated and previous files will be deleted.\")\n else:\n open_config = OmegaConf.load(config_path)\n loaded_dataset_cfg = DataConfig(**open_config)\n current_tokenizer = prepare_tokenizer(data_cfg.tokenizer_name)\n loaded_tokenizer = prepare_tokenizer(loaded_dataset_cfg.tokenizer_name)\n\n if type(current_tokenizer) != type(loaded_tokenizer):\n fabric.print(\"Tokenizer type changed.\")\n fabric.print(f\"Was {type(loaded_tokenizer)} now {type(current_tokenizer)}\")\n fabric.print(\"New dataset will be recreated and previous files will be deleted.\")\n create_dataset = True\n elif data_cfg != loaded_dataset_cfg:\n diffs = {}\n for k, v in asdict(data_cfg).items():\n if v != getattr(loaded_dataset_cfg, k):\n diffs[k] = (v, getattr(loaded_dataset_cfg, k))\n fabric.print(\"Dataset config changed.\")\n\n fabric.print(\"\\n\".join([f\"{k} was {v[0]} now {v[1]}\" for k, v in diffs.items()]))\n fabric.print(\"New dataset will be recreated and previous files will be deleted.\")\n create_dataset = True\n else:\n fabric.print(\"Dataset already exists. Skipping dataset creation.\")\n create_dataset = False\n if create_dataset:\n shutil.rmtree(full_path)\n # This disables FastTokenizer's parallelism for multiprocessing with dataloaders\n # TODO: check if can be avoided\n os.environ.setdefault(\"TOKENIZERS_PARALLELISM\", \"false\")\n data_processor.tokenizer = prepare_tokenizer(data_cfg.tokenizer_name)\n if create_dataset and fabric.is_global_zero:\n fabric.print(f\"Creating new dataset in {full_path}\")\n data_processor.process()\n OmegaConf.save(data_cfg, full_path / \"config.yaml\")\n fabric.barrier()\n\n return data_processor" }, { "identifier": "create_tensorboard_logger", "path": "sheeprlhf/utils/helper.py", "snippet": "def create_tensorboard_logger(\n fabric: Fabric, cfg: Dict[str, Any], override_log_level: bool = False\n) -> Tuple[Optional[TensorBoardLogger]]:\n \"\"\"Creates tensorboard logger.\n\n Set logger only on rank-0 but share the logger directory: since\n we don't know. what is happening during the `fabric.save()` method,\n at least we assure that all ranks save under the same named folder.\n As a plus, rank-0 sets the time uniquely for everyone.\n \"\"\"\n # Set logger only on rank-0 but share the logger directory: since we don't know\n # what is happening during the `fabric.save()` method, at least we assure that all\n # ranks save under the same named folder.\n # As a plus, rank-0 sets the time uniquely for everyone\n logger = None\n if fabric.is_global_zero:\n root_dir = os.path.join(\"logs\", \"runs\", cfg.root_dir)\n if override_log_level or cfg.metric.log_level > 0:\n logger = TensorBoardLogger(root_dir=root_dir, name=cfg.run_name)\n return logger" }, { "identifier": "get_log_dir", "path": "sheeprlhf/utils/helper.py", "snippet": "def get_log_dir(fabric: Fabric, root_dir: str, run_name: str, share: bool = True) -> str:\n \"\"\"Return and, if necessary, create the log directory.\n\n If there are more than one processes, the rank-0 process shares\n the directory to the others\n (if the `share` parameter is set to `True`).\n\n Args:\n fabric: the fabric instance.\n root_dir: the root directory of the experiment.\n run_name: the name of the experiment.\n share: whether or not to share the `log_dir` among processes.\n\n Returns:\n The log directory of the experiment.\n \"\"\"\n world_collective = TorchCollective()\n if fabric.world_size > 1 and share:\n world_collective.setup()\n world_collective.create_group()\n if fabric.is_global_zero:\n # If the logger was instantiated, then take the log_dir from it\n if len(fabric.loggers) > 0:\n log_dir = fabric.logger.log_dir\n else:\n # Otherwise the rank-zero process creates the log_dir\n save_dir = os.path.join(\"logs\", \"runs\", root_dir, run_name)\n fs = get_filesystem(root_dir)\n try:\n listdir_info = fs.listdir(save_dir)\n existing_versions = []\n for listing in listdir_info:\n d = listing[\"name\"]\n bn = os.path.basename(d)\n if _is_dir(fs, d) and bn.startswith(\"version_\"):\n dir_ver = bn.split(\"_\")[1].replace(\"/\", \"\")\n existing_versions.append(int(dir_ver))\n version = 0 if len(existing_versions) == 0 else max(existing_versions) + 1\n log_dir = os.path.join(save_dir, f\"version_{version}\")\n except OSError:\n warnings.warn(\"Missing logger folder: %s\", save_dir, stacklevel=2)\n log_dir = os.path.join(save_dir, f\"version_{0}\")\n\n os.makedirs(log_dir, exist_ok=True)\n if fabric.world_size > 1 and share:\n world_collective.broadcast_object_list([log_dir], src=0)\n else:\n data = [None]\n world_collective.broadcast_object_list(data, src=0)\n log_dir = data[0]\n return log_dir" }, { "identifier": "log_text", "path": "sheeprlhf/utils/helper.py", "snippet": "@rank_zero_only\ndef log_text(fabric: lightning.Fabric, text: str, name: str, step: int):\n \"\"\"Wrapper function to log text to tensorboard.\"\"\"\n if fabric.logger is not None:\n if isinstance(fabric.logger, lightning.fabric.loggers.tensorboard.TensorBoardLogger):\n fabric.logger.experiment.add_text(name, text, step)\n else:\n warnings.warn(f\"Logging text is not supported for {type(fabric.logger)}\", stacklevel=2)" }, { "identifier": "instantiate_from_config", "path": "sheeprlhf/utils/hydra.py", "snippet": "def instantiate_from_config(config: Any, *args, **kwargs):\n \"\"\"Wrapper function to instantiate objects from Hydra config.\"\"\"\n config_copy = deepcopy(config)\n if is_dataclass(config_copy):\n config_copy = asdict(config_copy)\n if isinstance(config_copy, dict) and \"config_name\" in config_copy:\n config_copy.pop(\"config_name\")\n return instantiate(config_copy, *args, **kwargs)" }, { "identifier": "PPOMetricManager", "path": "sheeprlhf/utils/metric.py", "snippet": "class PPOMetricManager(MetricManager): # noqa: D101\n train_actor_loss: LastValueMetric\n train_critic_loss: LastValueMetric\n train_reward_mean: LastValueMetric\n train_kl_div_mean: LastValueMetric\n info_lr: LastValueMetric\n info_ppo_time: LastValueMetric\n info_rollout_time: LastValueMetric\n info_kl_coeff: LastValueMetric\n info_actor_grad_norm: LastValueMetric\n info_critic_grad_norm: LastValueMetric\n debug_reward_scores: StatsMetric\n debug_advantages: StatsMetric\n debug_returns: StatsMetric" }, { "identifier": "compute_grad_norm", "path": "sheeprlhf/utils/model.py", "snippet": "def compute_grad_norm(model: torch.nn.Module) -> float: # noqa: D103\n total_norm = 0\n parameters = [p for p in model.parameters() if p.grad is not None and p.requires_grad]\n for p in parameters:\n param_norm = p.grad.detach().cpu().data.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm = total_norm**0.5\n return total_norm" }, { "identifier": "prepare_optimizer_parameters", "path": "sheeprlhf/utils/model.py", "snippet": "def prepare_optimizer_parameters(model: torch.nn.Module, weight_decay: float) -> List[Dict[str, Any]]:\n \"\"\"Taken from https://github.com/karpathy/nanoGPT.\"\"\"\n param_dict = {pn: p for pn, p in model.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n\n return optim_groups, num_decay_params, num_nodecay_params" }, { "identifier": "AdaptiveKLController", "path": "sheeprlhf/utils/ppo.py", "snippet": "class AdaptiveKLController:\n \"\"\"A class for controlling the KL divergence between the old and new policy in PPO.\n\n Parameters:\n init_kl_coeff : float\n The initial value for the KL coefficient.\n target_kl_coeff : float\n The target value for the KL coefficient.\n kl_horizon : float\n The number of steps over which to adjust the KL coefficient.\n clip_range : float\n The maximum amount by which to clip the proportional error.\n\n Attributes:\n value : float\n The current value of the KL coefficient.\n \"\"\"\n\n def __init__(self, init_kl_coeff: float, target_kl_coeff: float, kl_horizon: float, clip_range: float):\n self.value = init_kl_coeff\n self.target_kl_coeff = target_kl_coeff\n self.kl_horizon = kl_horizon\n self.clip_range = clip_range\n\n def update(self, current: int, n_steps: int):\n \"\"\"Update the value of the PPO object based on the current KL divergence and the number of steps taken.\n\n Args:\n current (float): The current KL divergence.\n n_steps (int): The number of steps taken.\n \"\"\"\n target = self.target_kl_coeff\n proportional_error = torch.clamp(current / target - 1, -self.clip_range, self.clip_range)\n mult = 1 + proportional_error * n_steps / self.kl_horizon\n self.value *= mult" }, { "identifier": "FixedKLController", "path": "sheeprlhf/utils/ppo.py", "snippet": "class FixedKLController:\n \"\"\"Dummy KL controller that does not update.\"\"\"\n\n def __init__(self, kl_coeff):\n self.value = kl_coeff\n\n def update(self, current, n_steps): # noqa: D102\n pass" }, { "identifier": "collect_rollout", "path": "sheeprlhf/utils/ppo.py", "snippet": "@torch.no_grad()\ndef collect_rollout(\n batch: Dict[str, torch.Tensor],\n agent: PPOAgent,\n kl_controller: Union[FixedKLController, AdaptiveKLController],\n generation_config: GenerationConfig,\n task_cfg: PPOConfig,\n tokenizer: PreTrainedTokenizer,\n fabric: lightning.Fabric,\n metrics: PPOMetricManager,\n) -> Dict[str, torch.Tensor]:\n \"\"\"Collects rollout data for PPO algorithm.\n\n Args:\n batch: The rollout batch data\n agent: The PPO agent.\n kl_controller: The KL controller for penalty.\n generation_config: The generation configuration.\n task_cfg: The PPO configuration used for training\n tokenizer: The model tokenizer.\n fabric: The fabric object.\n metrics: The metric manager for training.\n\n Returns:\n The rollout data.\n \"\"\"\n # We have the batch as dictionary let's create tensordict\n # so we can create dataloader with Fabric that transfers the data\n # to correct devices.\n batch_tdict = make_tensordict(batch)\n mini_batch_dataloader = DataLoader(\n batch_tdict,\n shuffle=False,\n batch_size=task_cfg.rollout_mini_batch_size,\n collate_fn=lambda x: x,\n num_workers=0,\n drop_last=False,\n )\n mini_batch_dataloader = fabric.setup_dataloaders(mini_batch_dataloader, use_distributed_sampler=False)\n rollout_dict_list = []\n\n # We use first generated token index - 1 to obtain correct logprobs.\n # Here we have batch of data fed into all models we have here is the input looks like:\n # Assuming padding tokens are `O` and input tokens are `I`\n # O O I I I\n # O O O I I (left padded batch)\n # O I I I I\n # After responses are generated we have new data assuming response tokens are `R`\n # O O I I I R R R O O O\n # O O O I I R R R R R O (padded from right side to longest text)\n # O I I I I R R R R R R\n start_token_idx = batch[\"prompt_input_ids\"].size(1) - 1\n for i, mini_batch in enumerate(mini_batch_dataloader):\n prompt_input_ids = mini_batch[\"prompt_input_ids\"]\n prompt_attention_mask = mini_batch[\"prompt_attention_mask\"]\n data = {\"input_ids\": prompt_input_ids, \"attention_mask\": prompt_attention_mask}\n\n input_ids = agent.actor.generate(**data, generation_config=generation_config)\n max_len_diff = generation_config.max_new_tokens - (input_ids.size(1) - prompt_input_ids.size(1))\n if max_len_diff > 0:\n input_ids = torch.nn.functional.pad(input_ids, (0, max_len_diff), value=tokenizer.pad_token_id)\n attention_masks = (input_ids != generation_config.pad_token_id).int()\n\n data = {\"input_ids\": input_ids, \"attention_mask\": attention_masks}\n # for logprobs we already omit the last tokens from computation\n actor_log_probs = agent.actor(**data)[:, start_token_idx:]\n ref_log_probs = agent.reference(**data)[:, start_token_idx:]\n # We need to also do the same for value and reward outputs\n values = agent.critic(**data)[:, start_token_idx:-1]\n reward_outputs = agent.reward(**data)[:, start_token_idx:-1]\n\n mini_batch_rollout = {\n \"input_ids\": input_ids, # (B, T) (B, (prompt + generated))\n \"attention_mask\": attention_masks, # (B, T) (B, (prompt + generated))\n \"actor_log_probs\": actor_log_probs, # (B, num_new_tokens)\n \"ref_log_probs\": ref_log_probs, # (B, num_new_tokens)\n \"values\": values, # (B, num_new_tokens)\n \"reward_outputs\": reward_outputs, # (B, num_new_tokens)\n }\n mini_batch_tdict = make_tensordict(mini_batch_rollout).cpu()\n rollout_dict_list.append(mini_batch_tdict)\n if i == 0:\n sample_from_rollout = tokenizer.decode(input_ids[0], skip_special_tokens=True)\n\n rollout = torch.cat(rollout_dict_list, 0)\n action_mask = rollout[\"attention_mask\"][:, start_token_idx:-1].int()\n reward_outputs = rollout.pop(\"reward_outputs\")\n # we already removed the last token from action mask\n # we dont need to remove it from last_token_idx\n last_token_idx = torch.argmax(torch.cumsum(action_mask, dim=1) * action_mask, dim=1, keepdim=True)\n reward_scores = torch.gather(reward_outputs, dim=-1, index=last_token_idx).squeeze(-1)\n kl_div = rollout[\"actor_log_probs\"] - rollout[\"ref_log_probs\"]\n\n mean_kl_div = masked_mean(kl_div, action_mask).mean()\n if task_cfg.clip_rewards:\n torch.clip_(reward_scores, -task_cfg.reward_clip_value, task_cfg.reward_clip_value)\n\n if task_cfg.normalize_rewards:\n # we normalize the reward but do not shift the mean\n # TODO: Does it really important to normalize the rewards?\n reward_scores = normalize(reward_scores, shift_mean=False)\n\n # Rewards are made of two components:\n # 1. Per token kl divergence\n # 2. Last token reward\n # Combination of these two component creates the reward signal\n rewards = kl_div.detach().clone() * -kl_controller.value\n rewards.scatter_add_(dim=1, index=last_token_idx, src=reward_scores.unsqueeze(-1))\n values = rollout[\"values\"]\n\n advantages, returns = compute_advantages_and_returns(\n rewards=rewards * action_mask,\n values=values * action_mask,\n gamma=task_cfg.gae_gamma,\n lambd=task_cfg.gae_lambd,\n )\n rollout[\"advantages\"] = advantages\n rollout[\"returns\"] = returns\n kl_controller.update(mean_kl_div, rollout[\"input_ids\"].size(0))\n metrics.train_kl_div_mean.update(mean_kl_div.item())\n metrics.train_reward_mean.update(reward_scores.mean().item())\n metrics.debug_reward_scores(reward_scores)\n metrics.debug_advantages(advantages)\n metrics.debug_returns(returns)\n\n return rollout, sample_from_rollout" }, { "identifier": "masked_normalize", "path": "sheeprlhf/utils/ppo.py", "snippet": "def masked_normalize( # noqa: D103\n tensor: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True, dim: int = 1, eps: float = 1e-8\n) -> torch.Tensor:\n tensor = tensor * mask\n mean = masked_mean(tensor, mask, dim=dim)\n mean_centered = tensor - mean\n var = masked_mean(mean_centered**2, mask, dim=dim)\n normalized = mean_centered * var.clamp(min=eps).rsqrt()\n if not shift_mean:\n normalized += mean\n return normalized" }, { "identifier": "register_task", "path": "sheeprlhf/utils/registry.py", "snippet": "def register_task():\n \"\"\"Task registery decorator.\"\"\"\n\n def inner_decorator(fn):\n return _register_task(fn)\n\n return inner_decorator" } ]
import copy import time import torch from pathlib import Path from typing import Dict from lightning import Fabric from torch.utils.data import DataLoader from tqdm import tqdm from transformers import GenerationConfig, PreTrainedTokenizer from sheeprlhf.agent.ppo import PPOAgent from sheeprlhf.data.base import TextDataset from sheeprlhf.data.collate import LeftPadCollate from sheeprlhf.loss.ppo import policy_loss, value_loss from sheeprlhf.model.actor import ActorModel from sheeprlhf.structure.data import DataConfig from sheeprlhf.structure.generation import GenConfig from sheeprlhf.structure.model import ModelConfig from sheeprlhf.structure.task import PPOConfig from sheeprlhf.utils.data import prepare_generation_config, validate_dataset from sheeprlhf.utils.helper import create_tensorboard_logger, get_log_dir, log_text from sheeprlhf.utils.hydra import instantiate_from_config from sheeprlhf.utils.metric import PPOMetricManager from sheeprlhf.utils.model import compute_grad_norm, prepare_optimizer_parameters from sheeprlhf.utils.ppo import AdaptiveKLController, FixedKLController, collect_rollout, masked_normalize from sheeprlhf.utils.registry import register_task
11,159
tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=gen_cfg, fabric=fabric, ) eval_gen_cfg = copy.deepcopy(gen_cfg) eval_gen_cfg.do_sample = False eval_generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=eval_gen_cfg, fabric=fabric, ) # Setup Optimizer Scheduler fabric models actor_trainable_params, _, _ = prepare_optimizer_parameters(agent.actor, weight_decay=optim_cfg.weight_decay) actor_optimizer = instantiate_from_config( optim_cfg, params=actor_trainable_params, _convert_="partial", ) actor_optimizer = fabric.setup_optimizers(actor_optimizer) critic_trainable_params, _, _ = prepare_optimizer_parameters(agent.critic, weight_decay=optim_cfg.weight_decay) critic_optimizer = instantiate_from_config( optim_cfg, params=critic_trainable_params, _convert_="partial", ) critic_optimizer = fabric.setup_optimizers(critic_optimizer) if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, gen_text, "info/example_sample", step=0) fabric.log("info/example_last_reward", score, step=0) num_training_steps = 2 if cfg.dry_run else task_cfg.epochs * len(train_dataloader) # KL Controller if task_cfg.adaptive_kl_coeff: kl_controller = AdaptiveKLController( init_kl_coef=task_cfg.init_kl_coeff, target=task_cfg.target_kl_coeff, kl_horizon=num_training_steps ) else: kl_controller = FixedKLController(kl_coeff=task_cfg.init_kl_coeff) fabric.print("Model Checkpoint interval: ", task_cfg.save_interval, "steps") fabric.print("Model Evaluation interval: ", task_cfg.eval_interval, "steps") iterator = tqdm(range(num_training_steps), disable=not fabric.is_global_zero) data_iterator = iter(train_dataloader) agent.reward.eval() for k in iterator: # Setup counters and data if k % len(train_dataloader) == 0 or data_iterator is None: data_iterator = iter(train_dataloader) is_accumulating = (k) % task_cfg.gradient_accumulation_steps != 0 last_step = k == num_training_steps - 1 # Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens) p_loss = policy_loss( log_probs=log_probs, old_log_probs=old_log_probs, advantages=advantages, clip_coeff=task_cfg.clip_coeff, action_mask=action_mask, ) fabric.backward(p_loss / task_cfg.gradient_accumulation_steps) with fabric.no_backward_sync(agent.critic, enabled=is_accumulating): values = agent.critic(**generated_data)[:, start_token_idx:-1] # (B, num_new_tokens)
@torch.no_grad() def generate( # noqa: D103 agent: PPOAgent, tokenizer: PreTrainedTokenizer, generation_config: GenerationConfig, example_prompt: Dict[str, torch.Tensor], device: torch.device, ): generated_input_ids = agent.actor.module.generate( input_ids=example_prompt["input_ids"].to(device), attention_mask=example_prompt["attention_mask"].to(device), generation_config=generation_config, use_cache=True, ) prompt_length = example_prompt["input_ids"].shape[1] generated_attention_mask = (generated_input_ids != generation_config.pad_token_id).int() generated_data = {"input_ids": generated_input_ids, "attention_mask": generated_attention_mask} reward = agent.reward(**generated_data)[:, prompt_length:] action_mask = (generated_input_ids != generation_config.pad_token_id).int()[:, prompt_length:] last_token_idx = torch.argmax(torch.cumsum(action_mask, dim=1) * action_mask, dim=1, keepdim=True) reward_score = torch.gather(reward, dim=-1, index=last_token_idx).squeeze(-1) return tokenizer.decode(generated_input_ids[0], skip_special_tokens=True), reward_score.item() @register_task() def main(fabric: Fabric, cfg: Dict): # noqa: D103 task_cfg = PPOConfig(**cfg.task) model_cfg = ModelConfig(**cfg.model) data_cfg = DataConfig(**cfg.data) gen_cfg = GenConfig(**cfg.generation) optim_cfg = cfg.optim fabric.seed_everything(cfg.seed + fabric.global_rank) # Create TensorBoardLogger. This will create the logger only on the # rank-0 process logger = create_tensorboard_logger(fabric, cfg, override_log_level=True) if logger and fabric.is_global_zero: fabric._loggers = [logger] fabric.logger.log_hyperparams(cfg) log_dir = get_log_dir(fabric, cfg.root_dir, cfg.run_name) experiment_dir = Path(log_dir).parent # Setup Metrics metrics = PPOMetricManager(log_interval=task_cfg.log_interval).to(fabric.device) # Setup Dataloaders data_processor = validate_dataset(fabric, data_cfg) dataset_path = Path(data_processor.full_path) tokenizer = data_processor.tokenizer collator = LeftPadCollate(pad_value=tokenizer.pad_token_id, ignore_index=data_cfg.ignore_index) train_dataset = TextDataset(dataframe_path=dataset_path / "finetune_train.pkl") train_dataloader = DataLoader( train_dataset, shuffle=True, batch_size=task_cfg.micro_batch_size, collate_fn=collator, num_workers=task_cfg.num_workers, ) train_dataloader = fabric.setup_dataloaders(train_dataloader) example_prompt = torch.load(dataset_path / "example_prompt.pt") # Setup Model with fabric.init_module(empty_init=model_cfg.fabric_empty_init): agent = PPOAgent(model_cfg=model_cfg, task_cfg=task_cfg) agent.load_checkpoint(device=fabric.device) agent.setup_finetuning() agent.actor = fabric.setup_module(agent.actor) agent.critic = fabric.setup_module(agent.critic) if not agent.share_critic_reward: agent.reward = fabric.setup_module(agent.reward) if not agent.share_actor_critic and not agent.lora_enabled: agent.reference = fabric.setup_module(agent.reference) # Setup Generation Configs generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=gen_cfg, fabric=fabric, ) eval_gen_cfg = copy.deepcopy(gen_cfg) eval_gen_cfg.do_sample = False eval_generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=eval_gen_cfg, fabric=fabric, ) # Setup Optimizer Scheduler fabric models actor_trainable_params, _, _ = prepare_optimizer_parameters(agent.actor, weight_decay=optim_cfg.weight_decay) actor_optimizer = instantiate_from_config( optim_cfg, params=actor_trainable_params, _convert_="partial", ) actor_optimizer = fabric.setup_optimizers(actor_optimizer) critic_trainable_params, _, _ = prepare_optimizer_parameters(agent.critic, weight_decay=optim_cfg.weight_decay) critic_optimizer = instantiate_from_config( optim_cfg, params=critic_trainable_params, _convert_="partial", ) critic_optimizer = fabric.setup_optimizers(critic_optimizer) if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, gen_text, "info/example_sample", step=0) fabric.log("info/example_last_reward", score, step=0) num_training_steps = 2 if cfg.dry_run else task_cfg.epochs * len(train_dataloader) # KL Controller if task_cfg.adaptive_kl_coeff: kl_controller = AdaptiveKLController( init_kl_coef=task_cfg.init_kl_coeff, target=task_cfg.target_kl_coeff, kl_horizon=num_training_steps ) else: kl_controller = FixedKLController(kl_coeff=task_cfg.init_kl_coeff) fabric.print("Model Checkpoint interval: ", task_cfg.save_interval, "steps") fabric.print("Model Evaluation interval: ", task_cfg.eval_interval, "steps") iterator = tqdm(range(num_training_steps), disable=not fabric.is_global_zero) data_iterator = iter(train_dataloader) agent.reward.eval() for k in iterator: # Setup counters and data if k % len(train_dataloader) == 0 or data_iterator is None: data_iterator = iter(train_dataloader) is_accumulating = (k) % task_cfg.gradient_accumulation_steps != 0 last_step = k == num_training_steps - 1 # Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens) p_loss = policy_loss( log_probs=log_probs, old_log_probs=old_log_probs, advantages=advantages, clip_coeff=task_cfg.clip_coeff, action_mask=action_mask, ) fabric.backward(p_loss / task_cfg.gradient_accumulation_steps) with fabric.no_backward_sync(agent.critic, enabled=is_accumulating): values = agent.critic(**generated_data)[:, start_token_idx:-1] # (B, num_new_tokens)
v_loss = value_loss(
4
2023-10-31 12:02:02+00:00
16k
cpacker/MemGPT
tests/test_storage.py
[ { "identifier": "StorageConnector", "path": "memgpt/agent_store/storage.py", "snippet": "class StorageConnector:\n \"\"\"Defines a DB connection that is user-specific to access data: Documents, Passages, Archival/Recall Memory\"\"\"\n\n def __init__(self, table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n self.user_id = user_id\n self.agent_id = agent_id\n self.table_type = table_type\n\n # get object type\n if table_type == TableType.ARCHIVAL_MEMORY:\n self.type = Passage\n self.table_name = ARCHIVAL_TABLE_NAME\n elif table_type == TableType.RECALL_MEMORY:\n self.type = Message\n self.table_name = RECALL_TABLE_NAME\n elif table_type == TableType.DOCUMENTS:\n self.type = Document\n self.table_name == DOCUMENT_TABLE_NAME\n elif table_type == TableType.PASSAGES:\n self.type = Passage\n self.table_name = PASSAGE_TABLE_NAME\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n printd(f\"Using table name {self.table_name}\")\n\n # setup base filters for agent-specific tables\n if self.table_type == TableType.ARCHIVAL_MEMORY or self.table_type == TableType.RECALL_MEMORY:\n # agent-specific table\n assert agent_id is not None, \"Agent ID must be provided for agent-specific tables\"\n self.filters = {\"user_id\": self.user_id, \"agent_id\": self.agent_id}\n elif self.table_type == TableType.PASSAGES or self.table_type == TableType.DOCUMENTS:\n # setup base filters for user-specific tables\n assert agent_id is None, \"Agent ID must not be provided for user-specific tables\"\n self.filters = {\"user_id\": self.user_id}\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n def get_filters(self, filters: Optional[Dict] = {}):\n # get all filters for query\n if filters is not None:\n filter_conditions = {**self.filters, **filters}\n else:\n filter_conditions = self.filters\n return filter_conditions\n\n @staticmethod\n def get_storage_connector(table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:\n storage_type = config.archival_storage_type\n elif table_type == TableType.RECALL_MEMORY:\n storage_type = config.recall_storage_type\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n if storage_type == \"postgres\":\n from memgpt.agent_store.db import PostgresStorageConnector\n\n return PostgresStorageConnector(table_type, config, user_id, agent_id)\n elif storage_type == \"chroma\":\n from memgpt.agent_store.chroma import ChromaStorageConnector\n\n return ChromaStorageConnector(table_type, config, user_id, agent_id)\n\n # TODO: add back\n # elif storage_type == \"lancedb\":\n # from memgpt.agent_store.db import LanceDBConnector\n\n # return LanceDBConnector(agent_config=agent_config, table_type=table_type)\n\n elif storage_type == \"sqlite\":\n from memgpt.agent_store.db import SQLLiteStorageConnector\n\n return SQLLiteStorageConnector(table_type, config, user_id, agent_id)\n\n else:\n raise NotImplementedError(f\"Storage type {storage_type} not implemented\")\n\n @staticmethod\n def get_archival_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id, agent_id)\n\n @staticmethod\n def get_recall_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.RECALL_MEMORY, config, user_id, agent_id)\n\n @abstractmethod\n def get_filters(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:\n pass\n\n @abstractmethod\n def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:\n pass\n\n @abstractmethod\n def get(self, id: str) -> Optional[Record]:\n pass\n\n @abstractmethod\n def size(self, filters: Optional[Dict] = {}) -> int:\n pass\n\n @abstractmethod\n def insert(self, record: Record):\n pass\n\n @abstractmethod\n def insert_many(self, records: List[Record], show_progress=False):\n pass\n\n @abstractmethod\n def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:\n pass\n\n @abstractmethod\n def query_date(self, start_date, end_date):\n pass\n\n @abstractmethod\n def query_text(self, query):\n pass\n\n @abstractmethod\n def delete_table(self):\n pass\n\n @abstractmethod\n def delete(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def save(self):\n pass" }, { "identifier": "TableType", "path": "memgpt/agent_store/storage.py", "snippet": "class TableType:\n ARCHIVAL_MEMORY = \"archival_memory\" # recall memory table: memgpt_agent_{agent_id}\n RECALL_MEMORY = \"recall_memory\" # archival memory table: memgpt_agent_recall_{agent_id}\n PASSAGES = \"passages\" # TODO\n DOCUMENTS = \"documents\" # TODO" }, { "identifier": "embedding_model", "path": "memgpt/embeddings.py", "snippet": "def embedding_model(config: EmbeddingConfig, user_id: Optional[uuid.UUID] = None):\n \"\"\"Return LlamaIndex embedding model to use for embeddings\"\"\"\n\n endpoint_type = config.embedding_endpoint_type\n\n # TODO refactor to pass credentials through args\n credentials = MemGPTCredentials.load()\n\n if endpoint_type == \"openai\":\n additional_kwargs = {\"user_id\": user_id} if user_id else {}\n model = OpenAIEmbedding(api_base=config.embedding_endpoint, api_key=credentials.openai_key, additional_kwargs=additional_kwargs)\n return model\n elif endpoint_type == \"azure\":\n # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings\n model = \"text-embedding-ada-002\"\n deployment = credentials.azure_embedding_deployment if credentials.azure_embedding_deployment is not None else model\n return AzureOpenAIEmbedding(\n model=model,\n deployment_name=deployment,\n api_key=credentials.azure_key,\n azure_endpoint=credentials.azure_endpoint,\n api_version=credentials.azure_version,\n )\n elif endpoint_type == \"hugging-face\":\n try:\n return EmbeddingEndpoint(model=config.embedding_model, base_url=config.embedding_endpoint, user=user_id)\n except Exception as e:\n # TODO: remove, this is just to get passing tests\n print(e)\n return default_embedding_model()\n else:\n return default_embedding_model()" }, { "identifier": "Message", "path": "memgpt/data_types.py", "snippet": "class Message(Record):\n \"\"\"Representation of a message sent.\n\n Messages can be:\n - agent->user (role=='agent')\n - user->agent and system->agent (role=='user')\n - or function/tool call returns (role=='function'/'tool').\n \"\"\"\n\n def __init__(\n self,\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n role: str,\n text: str,\n model: Optional[str] = None, # model used to make function call\n name: Optional[str] = None, # optional participant name\n created_at: Optional[str] = None,\n tool_calls: Optional[List[ToolCall]] = None, # list of tool calls requested\n tool_call_id: Optional[str] = None,\n embedding: Optional[np.ndarray] = None,\n id: Optional[uuid.UUID] = None,\n ):\n super().__init__(id)\n self.user_id = user_id\n self.agent_id = agent_id\n self.text = text\n self.model = model # model name (e.g. gpt-4)\n self.created_at = datetime.now().astimezone() if created_at is None else created_at\n\n # openai info\n assert role in [\"system\", \"assistant\", \"user\", \"tool\"]\n self.role = role # role (agent/user/function)\n self.name = name\n\n # tool (i.e. function) call info (optional)\n\n # if role == \"assistant\", this MAY be specified\n # if role != \"assistant\", this must be null\n assert tool_calls is None or isinstance(tool_calls, list)\n self.tool_calls = tool_calls\n\n # if role == \"tool\", then this must be specified\n # if role != \"tool\", this must be null\n if role == \"tool\":\n assert tool_call_id is not None\n else:\n assert tool_call_id is None\n self.tool_call_id = tool_call_id\n\n # embedding (optional)\n self.embedding = embedding\n\n # def __repr__(self):\n # pass\n\n @staticmethod\n def dict_to_message(\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n openai_message_dict: dict,\n model: Optional[str] = None, # model used to make function call\n allow_functions_style: bool = False, # allow deprecated functions style?\n ):\n \"\"\"Convert a ChatCompletion message object into a Message object (synced to DB)\"\"\"\n\n # If we're going from deprecated function form\n if openai_message_dict[\"role\"] == \"function\":\n if not allow_functions_style:\n raise DeprecationWarning(openai_message_dict)\n assert \"tool_call_id\" in openai_message_dict, openai_message_dict\n\n # Convert from 'function' response to a 'tool' response\n # NOTE: this does not conventionally include a tool_call_id, it's on the caster to provide it\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=\"tool\", # NOTE\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=openai_message_dict[\"tool_calls\"] if \"tool_calls\" in openai_message_dict else None,\n tool_call_id=openai_message_dict[\"tool_call_id\"] if \"tool_call_id\" in openai_message_dict else None,\n )\n\n elif \"function_call\" in openai_message_dict and openai_message_dict[\"function_call\"] is not None:\n if not allow_functions_style:\n raise DeprecationWarning(openai_message_dict)\n assert openai_message_dict[\"role\"] == \"assistant\", openai_message_dict\n assert \"tool_call_id\" in openai_message_dict, openai_message_dict\n\n # Convert a function_call (from an assistant message) into a tool_call\n # NOTE: this does not conventionally include a tool_call_id (ToolCall.id), it's on the caster to provide it\n tool_calls = [\n ToolCall(\n id=openai_message_dict[\"tool_call_id\"], # NOTE: unconventional source, not to spec\n tool_call_type=\"function\",\n function={\n \"name\": openai_message_dict[\"function_call\"][\"name\"],\n \"arguments\": openai_message_dict[\"function_call\"][\"arguments\"],\n },\n )\n ]\n\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=openai_message_dict[\"role\"],\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=tool_calls,\n tool_call_id=None, # NOTE: None, since this field is only non-null for role=='tool'\n )\n\n else:\n # Basic sanity check\n if openai_message_dict[\"role\"] == \"tool\":\n assert \"tool_call_id\" in openai_message_dict and openai_message_dict[\"tool_call_id\"] is not None, openai_message_dict\n else:\n if \"tool_call_id\" in openai_message_dict:\n assert openai_message_dict[\"tool_call_id\"] is None, openai_message_dict\n\n if \"tool_calls\" in openai_message_dict and openai_message_dict[\"tool_calls\"] is not None:\n assert openai_message_dict[\"role\"] == \"assistant\", openai_message_dict\n\n tool_calls = [\n ToolCall(id=tool_call[\"id\"], tool_call_type=tool_call[\"type\"], function=tool_call[\"function\"])\n for tool_call in openai_message_dict[\"tool_calls\"]\n ]\n else:\n tool_calls = None\n\n # If we're going from tool-call style\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=openai_message_dict[\"role\"],\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=tool_calls,\n tool_call_id=openai_message_dict[\"tool_call_id\"] if \"tool_call_id\" in openai_message_dict else None,\n )\n\n def to_openai_dict(self):\n \"\"\"Go from Message class to ChatCompletion message object\"\"\"\n\n # TODO change to pydantic casting, eg `return SystemMessageModel(self)`\n\n if self.role == \"system\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional field, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n\n elif self.role == \"user\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional field, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n\n elif self.role == \"assistant\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional fields, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n if self.tool_calls is not None:\n openai_message[\"tool_calls\"] = [tool_call.to_dict() for tool_call in self.tool_calls]\n\n elif self.role == \"tool\":\n assert all([v is not None for v in [self.text, self.role, self.tool_call_id]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n \"tool_call_id\": self.tool_call_id,\n }\n\n else:\n raise ValueError(self.role)\n\n return openai_message" }, { "identifier": "Passage", "path": "memgpt/data_types.py", "snippet": "class Passage(Record):\n \"\"\"A passage is a single unit of memory, and a standard format accross all storage backends.\n\n It is a string of text with an assoidciated embedding.\n \"\"\"\n\n def __init__(\n self,\n user_id: uuid.UUID,\n text: str,\n agent_id: Optional[uuid.UUID] = None, # set if contained in agent memory\n embedding: Optional[np.ndarray] = None,\n data_source: Optional[str] = None, # None if created by agent\n doc_id: Optional[uuid.UUID] = None,\n id: Optional[uuid.UUID] = None,\n metadata: Optional[dict] = {},\n ):\n super().__init__(id)\n self.user_id = user_id\n self.agent_id = agent_id\n self.text = text\n self.data_source = data_source\n self.embedding = embedding\n self.doc_id = doc_id\n self.metadata = metadata\n\n assert isinstance(self.user_id, uuid.UUID), f\"UUID {self.user_id} must be a UUID type\"\n assert not agent_id or isinstance(self.agent_id, uuid.UUID), f\"UUID {self.agent_id} must be a UUID type\"\n assert not doc_id or isinstance(self.doc_id, uuid.UUID), f\"UUID {self.doc_id} must be a UUID type\"" }, { "identifier": "EmbeddingConfig", "path": "memgpt/data_types.py", "snippet": "class EmbeddingConfig:\n def __init__(\n self,\n embedding_endpoint_type: Optional[str] = \"openai\",\n embedding_endpoint: Optional[str] = \"https://api.openai.com/v1\",\n embedding_model: Optional[str] = \"text-embedding-ada-002\",\n embedding_dim: Optional[int] = 1536,\n embedding_chunk_size: Optional[int] = 300,\n ):\n self.embedding_endpoint_type = embedding_endpoint_type\n self.embedding_endpoint = embedding_endpoint\n self.embedding_model = embedding_model\n self.embedding_dim = embedding_dim\n self.embedding_chunk_size = embedding_chunk_size" }, { "identifier": "AgentState", "path": "memgpt/data_types.py", "snippet": "class AgentState:\n def __init__(\n self,\n name: str,\n user_id: uuid.UUID,\n persona: str, # the filename where the persona was originally sourced from\n human: str, # the filename where the human was originally sourced from\n llm_config: LLMConfig,\n embedding_config: EmbeddingConfig,\n preset: str,\n # (in-context) state contains:\n # persona: str # the current persona text\n # human: str # the current human text\n # system: str, # system prompt (not required if initializing with a preset)\n # functions: dict, # schema definitions ONLY (function code linked at runtime)\n # messages: List[dict], # in-context messages\n id: Optional[uuid.UUID] = None,\n state: Optional[dict] = None,\n created_at: Optional[str] = None,\n ):\n if id is None:\n self.id = uuid.uuid4()\n else:\n self.id = id\n assert isinstance(self.id, uuid.UUID), f\"UUID {self.id} must be a UUID type\"\n assert isinstance(user_id, uuid.UUID), f\"UUID {user_id} must be a UUID type\"\n\n # TODO(swooders) we need to handle the case where name is None here\n # in AgentConfig we autogenerate a name, not sure what the correct thing w/ DBs is, what about NounAdjective combos? Like giphy does? BoredGiraffe etc\n self.name = name\n self.user_id = user_id\n self.preset = preset\n self.persona = persona\n self.human = human\n\n self.llm_config = llm_config\n self.embedding_config = embedding_config\n\n self.created_at = created_at if created_at is not None else datetime.now()\n\n # state\n self.state = {} if not state else state" }, { "identifier": "OpenAIEmbeddingConfig", "path": "memgpt/data_types.py", "snippet": "class OpenAIEmbeddingConfig(EmbeddingConfig):\n def __init__(self, openai_key: Optional[str] = None, **kwargs):\n super().__init__(**kwargs)\n self.openai_key = openai_key" }, { "identifier": "MemGPTConfig", "path": "memgpt/config.py", "snippet": "class MemGPTConfig:\n config_path: str = os.path.join(MEMGPT_DIR, \"config\")\n anon_clientid: str = None\n\n # preset\n preset: str = DEFAULT_PRESET\n\n # persona parameters\n persona: str = DEFAULT_PERSONA\n human: str = DEFAULT_HUMAN\n agent: str = None\n\n # model parameters\n default_llm_config: LLMConfig = field(default_factory=LLMConfig)\n\n # embedding parameters\n default_embedding_config: EmbeddingConfig = field(default_factory=EmbeddingConfig)\n\n # database configs: archival\n archival_storage_type: str = \"chroma\" # local, db\n archival_storage_path: str = os.path.join(MEMGPT_DIR, \"chroma\")\n archival_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: recall\n recall_storage_type: str = \"sqlite\" # local, db\n recall_storage_path: str = MEMGPT_DIR\n recall_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: metadata storage (sources, agents, data sources)\n metadata_storage_type: str = \"sqlite\"\n metadata_storage_path: str = MEMGPT_DIR\n metadata_storage_uri: str = None\n\n # database configs: agent state\n persistence_manager_type: str = None # in-memory, db\n persistence_manager_save_file: str = None # local file\n persistence_manager_uri: str = None # db URI\n\n # version (for backcompat)\n memgpt_version: str = None\n\n # user info\n policies_accepted: bool = False\n\n def __post_init__(self):\n # ensure types\n # self.embedding_chunk_size = int(self.embedding_chunk_size)\n # self.embedding_dim = int(self.embedding_dim)\n # self.context_window = int(self.context_window)\n pass\n\n @staticmethod\n def generate_uuid() -> str:\n return uuid.UUID(int=uuid.getnode()).hex\n\n @classmethod\n def load(cls) -> \"MemGPTConfig\":\n # avoid circular import\n from memgpt.migrate import config_is_compatible, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n error_message = \" \".join(\n [\n f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}.\",\n f\"\\nTo use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}) or regenerate your config using `memgpt configure`, or `memgpt migrate` if you would like to migrate old agents.\",\n ]\n )\n raise ValueError(error_message)\n\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n # insure all configuration directories exist\n cls.create_config_dir()\n if os.path.exists(config_path):\n # read existing config\n config.read(config_path)\n\n # Handle extraction of nested LLMConfig and EmbeddingConfig\n llm_config_dict = {\n # Extract relevant LLM configuration from the config file\n \"model\": get_field(config, \"model\", \"model\"),\n \"model_endpoint\": get_field(config, \"model\", \"model_endpoint\"),\n \"model_endpoint_type\": get_field(config, \"model\", \"model_endpoint_type\"),\n \"model_wrapper\": get_field(config, \"model\", \"model_wrapper\"),\n \"context_window\": get_field(config, \"model\", \"context_window\"),\n }\n embedding_config_dict = {\n # Extract relevant Embedding configuration from the config file\n \"embedding_endpoint\": get_field(config, \"embedding\", \"embedding_endpoint\"),\n \"embedding_model\": get_field(config, \"embedding\", \"embedding_model\"),\n \"embedding_endpoint_type\": get_field(config, \"embedding\", \"embedding_endpoint_type\"),\n \"embedding_dim\": get_field(config, \"embedding\", \"embedding_dim\"),\n \"embedding_chunk_size\": get_field(config, \"embedding\", \"chunk_size\"),\n }\n # Correct the types that aren't strings\n if llm_config_dict[\"context_window\"] is not None:\n llm_config_dict[\"context_window\"] = int(llm_config_dict[\"context_window\"])\n if embedding_config_dict[\"embedding_dim\"] is not None:\n embedding_config_dict[\"embedding_dim\"] = int(embedding_config_dict[\"embedding_dim\"])\n if embedding_config_dict[\"embedding_chunk_size\"] is not None:\n embedding_config_dict[\"embedding_chunk_size\"] = int(embedding_config_dict[\"embedding_chunk_size\"])\n # Construct the inner properties\n llm_config = LLMConfig(**llm_config_dict)\n embedding_config = EmbeddingConfig(**embedding_config_dict)\n\n # Everything else\n config_dict = {\n # Two prepared configs\n \"default_llm_config\": llm_config,\n \"default_embedding_config\": embedding_config,\n # Agent related\n \"preset\": get_field(config, \"defaults\", \"preset\"),\n \"persona\": get_field(config, \"defaults\", \"persona\"),\n \"human\": get_field(config, \"defaults\", \"human\"),\n \"agent\": get_field(config, \"defaults\", \"agent\"),\n # Storage related\n \"archival_storage_type\": get_field(config, \"archival_storage\", \"type\"),\n \"archival_storage_path\": get_field(config, \"archival_storage\", \"path\"),\n \"archival_storage_uri\": get_field(config, \"archival_storage\", \"uri\"),\n \"recall_storage_type\": get_field(config, \"recall_storage\", \"type\"),\n \"recall_storage_path\": get_field(config, \"recall_storage\", \"path\"),\n \"recall_storage_uri\": get_field(config, \"recall_storage\", \"uri\"),\n \"metadata_storage_type\": get_field(config, \"metadata_storage\", \"type\"),\n \"metadata_storage_path\": get_field(config, \"metadata_storage\", \"path\"),\n \"metadata_storage_uri\": get_field(config, \"metadata_storage\", \"uri\"),\n # Misc\n \"anon_clientid\": get_field(config, \"client\", \"anon_clientid\"),\n \"config_path\": config_path,\n \"memgpt_version\": get_field(config, \"version\", \"memgpt_version\"),\n }\n\n # Don't include null values\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n\n return cls(**config_dict)\n\n # create new config\n anon_clientid = MemGPTConfig.generate_uuid()\n config = cls(anon_clientid=anon_clientid, config_path=config_path)\n config.create_config_dir() # create dirs\n config.save() # save updated config\n\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n\n # CLI defaults\n set_field(config, \"defaults\", \"preset\", self.preset)\n set_field(config, \"defaults\", \"persona\", self.persona)\n set_field(config, \"defaults\", \"human\", self.human)\n set_field(config, \"defaults\", \"agent\", self.agent)\n\n # model defaults\n set_field(config, \"model\", \"model\", self.default_llm_config.model)\n set_field(config, \"model\", \"model_endpoint\", self.default_llm_config.model_endpoint)\n set_field(config, \"model\", \"model_endpoint_type\", self.default_llm_config.model_endpoint_type)\n set_field(config, \"model\", \"model_wrapper\", self.default_llm_config.model_wrapper)\n set_field(config, \"model\", \"context_window\", str(self.default_llm_config.context_window))\n\n # embeddings\n set_field(config, \"embedding\", \"embedding_endpoint_type\", self.default_embedding_config.embedding_endpoint_type)\n set_field(config, \"embedding\", \"embedding_endpoint\", self.default_embedding_config.embedding_endpoint)\n set_field(config, \"embedding\", \"embedding_model\", self.default_embedding_config.embedding_model)\n set_field(config, \"embedding\", \"embedding_dim\", str(self.default_embedding_config.embedding_dim))\n set_field(config, \"embedding\", \"embedding_chunk_size\", str(self.default_embedding_config.embedding_chunk_size))\n\n # archival storage\n set_field(config, \"archival_storage\", \"type\", self.archival_storage_type)\n set_field(config, \"archival_storage\", \"path\", self.archival_storage_path)\n set_field(config, \"archival_storage\", \"uri\", self.archival_storage_uri)\n\n # recall storage\n set_field(config, \"recall_storage\", \"type\", self.recall_storage_type)\n set_field(config, \"recall_storage\", \"path\", self.recall_storage_path)\n set_field(config, \"recall_storage\", \"uri\", self.recall_storage_uri)\n\n # metadata storage\n set_field(config, \"metadata_storage\", \"type\", self.metadata_storage_type)\n set_field(config, \"metadata_storage\", \"path\", self.metadata_storage_path)\n set_field(config, \"metadata_storage\", \"uri\", self.metadata_storage_uri)\n\n # set version\n set_field(config, \"version\", \"memgpt_version\", memgpt.__version__)\n\n # client\n if not self.anon_clientid:\n self.anon_clientid = self.generate_uuid()\n set_field(config, \"client\", \"anon_clientid\", self.anon_clientid)\n\n # always make sure all directories are present\n self.create_config_dir()\n\n with open(self.config_path, \"w\") as f:\n config.write(f)\n logger.debug(f\"Saved Config: {self.config_path}\")\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n assert not os.path.isdir(config_path), f\"Config path {config_path} cannot be set to a directory.\"\n return os.path.exists(config_path)\n\n @staticmethod\n def create_config_dir():\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n\n folders = [\"personas\", \"humans\", \"archival\", \"agents\", \"functions\", \"system_prompts\", \"presets\", \"settings\"]\n\n for folder in folders:\n if not os.path.exists(os.path.join(MEMGPT_DIR, folder)):\n os.makedirs(os.path.join(MEMGPT_DIR, folder))" }, { "identifier": "MemGPTCredentials", "path": "memgpt/credentials.py", "snippet": "class MemGPTCredentials:\n # credentials for MemGPT\n credentials_path: str = os.path.join(MEMGPT_DIR, \"credentials\")\n\n # openai config\n openai_auth_type: str = \"bearer_token\"\n openai_key: str = None\n\n # azure config\n azure_auth_type: str = \"api_key\"\n azure_key: str = None\n azure_endpoint: str = None\n azure_version: str = None\n azure_deployment: str = None\n azure_embedding_deployment: str = None\n\n # custom llm API config\n openllm_auth_type: str = None\n openllm_key: str = None\n\n @classmethod\n def load(cls) -> \"MemGPTCredentials\":\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CREDENTIALS_PATH\"):\n credentials_path = os.getenv(\"MEMGPT_CREDENTIALS_PATH\")\n else:\n credentials_path = MemGPTCredentials.credentials_path\n\n if os.path.exists(credentials_path):\n # read existing credentials\n config.read(credentials_path)\n config_dict = {\n # openai\n \"openai_auth_type\": get_field(config, \"openai\", \"auth_type\"),\n \"openai_key\": get_field(config, \"openai\", \"key\"),\n # azure\n \"azure_auth_type\": get_field(config, \"azure\", \"auth_type\"),\n \"azure_key\": get_field(config, \"azure\", \"key\"),\n \"azure_endpoint\": get_field(config, \"azure\", \"endpoint\"),\n \"azure_version\": get_field(config, \"azure\", \"version\"),\n \"azure_deployment\": get_field(config, \"azure\", \"deployment\"),\n \"azure_embedding_deployment\": get_field(config, \"azure\", \"embedding_deployment\"),\n # open llm\n \"openllm_auth_type\": get_field(config, \"openllm\", \"auth_type\"),\n \"openllm_key\": get_field(config, \"openllm\", \"key\"),\n # path\n \"credentials_path\": credentials_path,\n }\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n return cls(**config_dict)\n\n # create new config\n config = cls(credentials_path=credentials_path)\n config.save() # save updated config\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n # openai config\n set_field(config, \"openai\", \"auth_type\", self.openai_auth_type)\n set_field(config, \"openai\", \"key\", self.openai_key)\n\n # azure config\n set_field(config, \"azure\", \"auth_type\", self.azure_auth_type)\n set_field(config, \"azure\", \"key\", self.azure_key)\n set_field(config, \"azure\", \"endpoint\", self.azure_endpoint)\n set_field(config, \"azure\", \"version\", self.azure_version)\n set_field(config, \"azure\", \"deployment\", self.azure_deployment)\n set_field(config, \"azure\", \"embedding_deployment\", self.azure_embedding_deployment)\n\n # openai config\n set_field(config, \"openllm\", \"auth_type\", self.openllm_auth_type)\n set_field(config, \"openllm\", \"key\", self.openllm_key)\n\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n with open(self.credentials_path, \"w\") as f:\n config.write(f)\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CREDENTIALS_PATH\"):\n credentials_path = os.getenv(\"MEMGPT_CREDENTIALS_PATH\")\n else:\n credentials_path = MemGPTCredentials.credentials_path\n\n assert not os.path.isdir(credentials_path), f\"Credentials path {credentials_path} cannot be set to a directory.\"\n return os.path.exists(credentials_path)" }, { "identifier": "StorageConnector", "path": "memgpt/agent_store/storage.py", "snippet": "class StorageConnector:\n \"\"\"Defines a DB connection that is user-specific to access data: Documents, Passages, Archival/Recall Memory\"\"\"\n\n def __init__(self, table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n self.user_id = user_id\n self.agent_id = agent_id\n self.table_type = table_type\n\n # get object type\n if table_type == TableType.ARCHIVAL_MEMORY:\n self.type = Passage\n self.table_name = ARCHIVAL_TABLE_NAME\n elif table_type == TableType.RECALL_MEMORY:\n self.type = Message\n self.table_name = RECALL_TABLE_NAME\n elif table_type == TableType.DOCUMENTS:\n self.type = Document\n self.table_name == DOCUMENT_TABLE_NAME\n elif table_type == TableType.PASSAGES:\n self.type = Passage\n self.table_name = PASSAGE_TABLE_NAME\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n printd(f\"Using table name {self.table_name}\")\n\n # setup base filters for agent-specific tables\n if self.table_type == TableType.ARCHIVAL_MEMORY or self.table_type == TableType.RECALL_MEMORY:\n # agent-specific table\n assert agent_id is not None, \"Agent ID must be provided for agent-specific tables\"\n self.filters = {\"user_id\": self.user_id, \"agent_id\": self.agent_id}\n elif self.table_type == TableType.PASSAGES or self.table_type == TableType.DOCUMENTS:\n # setup base filters for user-specific tables\n assert agent_id is None, \"Agent ID must not be provided for user-specific tables\"\n self.filters = {\"user_id\": self.user_id}\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n def get_filters(self, filters: Optional[Dict] = {}):\n # get all filters for query\n if filters is not None:\n filter_conditions = {**self.filters, **filters}\n else:\n filter_conditions = self.filters\n return filter_conditions\n\n @staticmethod\n def get_storage_connector(table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:\n storage_type = config.archival_storage_type\n elif table_type == TableType.RECALL_MEMORY:\n storage_type = config.recall_storage_type\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n if storage_type == \"postgres\":\n from memgpt.agent_store.db import PostgresStorageConnector\n\n return PostgresStorageConnector(table_type, config, user_id, agent_id)\n elif storage_type == \"chroma\":\n from memgpt.agent_store.chroma import ChromaStorageConnector\n\n return ChromaStorageConnector(table_type, config, user_id, agent_id)\n\n # TODO: add back\n # elif storage_type == \"lancedb\":\n # from memgpt.agent_store.db import LanceDBConnector\n\n # return LanceDBConnector(agent_config=agent_config, table_type=table_type)\n\n elif storage_type == \"sqlite\":\n from memgpt.agent_store.db import SQLLiteStorageConnector\n\n return SQLLiteStorageConnector(table_type, config, user_id, agent_id)\n\n else:\n raise NotImplementedError(f\"Storage type {storage_type} not implemented\")\n\n @staticmethod\n def get_archival_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id, agent_id)\n\n @staticmethod\n def get_recall_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.RECALL_MEMORY, config, user_id, agent_id)\n\n @abstractmethod\n def get_filters(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:\n pass\n\n @abstractmethod\n def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:\n pass\n\n @abstractmethod\n def get(self, id: str) -> Optional[Record]:\n pass\n\n @abstractmethod\n def size(self, filters: Optional[Dict] = {}) -> int:\n pass\n\n @abstractmethod\n def insert(self, record: Record):\n pass\n\n @abstractmethod\n def insert_many(self, records: List[Record], show_progress=False):\n pass\n\n @abstractmethod\n def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:\n pass\n\n @abstractmethod\n def query_date(self, start_date, end_date):\n pass\n\n @abstractmethod\n def query_text(self, query):\n pass\n\n @abstractmethod\n def delete_table(self):\n pass\n\n @abstractmethod\n def delete(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def save(self):\n pass" }, { "identifier": "TableType", "path": "memgpt/agent_store/storage.py", "snippet": "class TableType:\n ARCHIVAL_MEMORY = \"archival_memory\" # recall memory table: memgpt_agent_{agent_id}\n RECALL_MEMORY = \"recall_memory\" # archival memory table: memgpt_agent_recall_{agent_id}\n PASSAGES = \"passages\" # TODO\n DOCUMENTS = \"documents\" # TODO" }, { "identifier": "MetadataStore", "path": "memgpt/metadata.py", "snippet": "class MetadataStore:\n def __init__(self, config: MemGPTConfig):\n # TODO: get DB URI or path\n if config.metadata_storage_type == \"postgres\":\n self.uri = config.metadata_storage_uri\n elif config.metadata_storage_type == \"sqlite\":\n path = os.path.join(config.metadata_storage_path, \"sqlite.db\")\n self.uri = f\"sqlite:///{path}\"\n else:\n raise ValueError(f\"Invalid metadata storage type: {config.metadata_storage_type}\")\n\n # TODO: check to see if table(s) need to be greated or not\n\n self.engine = create_engine(self.uri)\n Base.metadata.create_all(\n self.engine, tables=[UserModel.__table__, AgentModel.__table__, SourceModel.__table__, AgentSourceMappingModel.__table__]\n )\n session_maker = sessionmaker(bind=self.engine)\n self.session = session_maker()\n\n @enforce_types\n def create_agent(self, agent: AgentState):\n # insert into agent table\n # make sure agent.name does not already exist for user user_id\n if self.session.query(AgentModel).filter(AgentModel.name == agent.name).filter(AgentModel.user_id == agent.user_id).count() > 0:\n raise ValueError(f\"Agent with name {agent.name} already exists\")\n self.session.add(AgentModel(**vars(agent)))\n self.session.commit()\n\n @enforce_types\n def create_source(self, source: Source):\n # make sure source.name does not already exist for user\n if (\n self.session.query(SourceModel).filter(SourceModel.name == source.name).filter(SourceModel.user_id == source.user_id).count()\n > 0\n ):\n raise ValueError(f\"Source with name {source.name} already exists\")\n self.session.add(SourceModel(**vars(source)))\n self.session.commit()\n\n @enforce_types\n def create_user(self, user: User):\n if self.session.query(UserModel).filter(UserModel.id == user.id).count() > 0:\n raise ValueError(f\"User with id {user.id} already exists\")\n self.session.add(UserModel(**vars(user)))\n self.session.commit()\n\n @enforce_types\n def update_agent(self, agent: AgentState):\n self.session.query(AgentModel).filter(AgentModel.id == agent.id).update(vars(agent))\n self.session.commit()\n\n @enforce_types\n def update_user(self, user: User):\n self.session.query(UserModel).filter(UserModel.id == user.id).update(vars(user))\n self.session.commit()\n\n @enforce_types\n def update_source(self, source: Source):\n self.session.query(SourceModel).filter(SourceModel.id == source.id).update(vars(source))\n self.session.commit()\n\n @enforce_types\n def delete_agent(self, agent_id: uuid.UUID):\n self.session.query(AgentModel).filter(AgentModel.id == agent_id).delete()\n self.session.commit()\n\n @enforce_types\n def delete_source(self, source_id: uuid.UUID):\n # delete from sources table\n self.session.query(SourceModel).filter(SourceModel.id == source_id).delete()\n\n # delete any mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def delete_user(self, user_id: uuid.UUID):\n # delete from users table\n self.session.query(UserModel).filter(UserModel.id == user_id).delete()\n\n # delete associated agents\n self.session.query(AgentModel).filter(AgentModel.user_id == user_id).delete()\n\n # delete associated sources\n self.session.query(SourceModel).filter(SourceModel.user_id == user_id).delete()\n\n # delete associated mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.user_id == user_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def list_agents(self, user_id: uuid.UUID) -> List[AgentState]:\n results = self.session.query(AgentModel).filter(AgentModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def list_sources(self, user_id: uuid.UUID) -> List[Source]:\n results = self.session.query(SourceModel).filter(SourceModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def get_agent(\n self, agent_id: Optional[uuid.UUID] = None, agent_name: Optional[str] = None, user_id: Optional[uuid.UUID] = None\n ) -> Optional[AgentState]:\n if agent_id:\n results = self.session.query(AgentModel).filter(AgentModel.id == agent_id).all()\n else:\n assert agent_name is not None and user_id is not None, \"Must provide either agent_id or agent_name\"\n results = self.session.query(AgentModel).filter(AgentModel.name == agent_name).filter(AgentModel.user_id == user_id).all()\n\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\" # should only be one result\n return results[0].to_record()\n\n @enforce_types\n def get_user(self, user_id: uuid.UUID) -> Optional[User]:\n results = self.session.query(UserModel).filter(UserModel.id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n @enforce_types\n def get_source(\n self, source_id: Optional[uuid.UUID] = None, user_id: Optional[uuid.UUID] = None, source_name: Optional[str] = None\n ) -> Optional[Source]:\n if source_id:\n results = self.session.query(SourceModel).filter(SourceModel.id == source_id).all()\n else:\n assert user_id is not None and source_name is not None\n results = self.session.query(SourceModel).filter(SourceModel.name == source_name).filter(SourceModel.user_id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n # agent source metadata\n @enforce_types\n def attach_source(self, user_id: uuid.UUID, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.add(AgentSourceMappingModel(user_id=user_id, agent_id=agent_id, source_id=source_id))\n self.session.commit()\n\n @enforce_types\n def list_attached_sources(self, agent_id: uuid.UUID) -> List[Column]:\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.agent_id == agent_id).all()\n return [r.source_id for r in results]\n\n @enforce_types\n def list_attached_agents(self, source_id: uuid.UUID):\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).all()\n return [r.agent_id for r in results]\n\n @enforce_types\n def detach_source(self, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.query(AgentSourceMappingModel).filter(\n AgentSourceMappingModel.agent_id == agent_id, AgentSourceMappingModel.source_id == source_id\n ).delete()\n self.session.commit()" }, { "identifier": "User", "path": "memgpt/data_types.py", "snippet": "class User:\n\n \"\"\"Defines user and default configurations\"\"\"\n\n # TODO: make sure to encrypt/decrypt keys before storing in DB\n\n def __init__(\n self,\n # name: str,\n id: Optional[uuid.UUID] = None,\n default_preset=DEFAULT_PRESET,\n default_persona=DEFAULT_PERSONA,\n default_human=DEFAULT_HUMAN,\n default_agent=None,\n # other\n policies_accepted=False,\n ):\n if id is None:\n self.id = uuid.uuid4()\n else:\n self.id = id\n assert isinstance(self.id, uuid.UUID), f\"UUID {self.id} must be a UUID type\"\n\n self.default_preset = default_preset\n self.default_persona = default_persona\n self.default_human = default_human\n self.default_agent = default_agent\n\n # misc\n self.policies_accepted = policies_accepted" } ]
import os import uuid import pytest from sqlalchemy.ext.declarative import declarative_base from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.embeddings import embedding_model from memgpt.data_types import Message, Passage, EmbeddingConfig, AgentState, OpenAIEmbeddingConfig from memgpt.config import MemGPTConfig from memgpt.credentials import MemGPTCredentials from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.metadata import MetadataStore from memgpt.data_types import User from datetime import datetime, timedelta
12,773
# Note: the database will filter out rows that do not correspond to agent1 and test_user by default. texts = ["This is a test passage", "This is another test passage", "Cinderella wept"] start_date = datetime(2009, 10, 5, 18, 00) dates = [start_date, start_date - timedelta(weeks=1), start_date + timedelta(weeks=1)] roles = ["user", "assistant", "assistant"] agent_1_id = uuid.uuid4() agent_2_id = uuid.uuid4() agent_ids = [agent_1_id, agent_2_id, agent_1_id] ids = [uuid.uuid4(), uuid.uuid4(), uuid.uuid4()] user_id = uuid.uuid4() # Data generation functions: Passages def generate_passages(embed_model): """Generate list of 3 Passage objects""" # embeddings: use openai if env is set, otherwise local passages = [] for text, _, _, agent_id, id in zip(texts, dates, roles, agent_ids, ids): embedding = None if embed_model: embedding = embed_model.get_text_embedding(text) passages.append(Passage(user_id=user_id, text=text, agent_id=agent_id, embedding=embedding, data_source="test_source", id=id)) return passages # Data generation functions: Messages def generate_messages(embed_model): """Generate list of 3 Message objects""" messages = [] for text, date, role, agent_id, id in zip(texts, dates, roles, agent_ids, ids): embedding = None if embed_model: embedding = embed_model.get_text_embedding(text) messages.append( Message(user_id=user_id, text=text, agent_id=agent_id, role=role, created_at=date, id=id, model="gpt-4", embedding=embedding) ) print(messages[-1].text) return messages @pytest.fixture(autouse=True) def clear_dynamically_created_models(): """Wipe globals for SQLAlchemy""" yield for key in list(globals().keys()): if key.endswith("Model"): del globals()[key] @pytest.fixture(autouse=True) def recreate_declarative_base(): """Recreate the declarative base before each test""" global Base Base = declarative_base() yield Base.metadata.clear() @pytest.mark.parametrize("storage_connector", ["postgres", "chroma", "sqlite"]) # @pytest.mark.parametrize("storage_connector", ["sqlite", "chroma"]) # @pytest.mark.parametrize("storage_connector", ["postgres"]) @pytest.mark.parametrize("table_type", [TableType.RECALL_MEMORY, TableType.ARCHIVAL_MEMORY]) def test_storage(storage_connector, table_type, clear_dynamically_created_models, recreate_declarative_base): # setup memgpt config # TODO: set env for different config path # hacky way to cleanup globals that scruw up tests # for table_name in ['Message']: # if 'Message' in globals(): # print("Removing messages", globals()['Message']) # del globals()['Message'] config = MemGPTConfig() if storage_connector == "postgres": if not os.getenv("PGVECTOR_TEST_DB_URL"): print("Skipping test, missing PG URI") return config.archival_storage_uri = os.getenv("PGVECTOR_TEST_DB_URL") config.recall_storage_uri = os.getenv("PGVECTOR_TEST_DB_URL") config.archival_storage_type = "postgres" config.recall_storage_type = "postgres" if storage_connector == "lancedb": # TODO: complete lancedb implementation if not os.getenv("LANCEDB_TEST_URL"): print("Skipping test, missing LanceDB URI") return config.archival_storage_uri = os.getenv("LANCEDB_TEST_URL") config.recall_storage_uri = os.getenv("LANCEDB_TEST_URL") config.archival_storage_type = "lancedb" config.recall_storage_type = "lancedb" if storage_connector == "chroma": if table_type == TableType.RECALL_MEMORY: print("Skipping test, chroma only supported for archival memory") return config.archival_storage_type = "chroma" config.archival_storage_path = "./test_chroma" if storage_connector == "sqlite": if table_type == TableType.ARCHIVAL_MEMORY: print("Skipping test, sqlite only supported for recall memory") return config.recall_storage_type = "sqlite" # get embedding model embed_model = None if os.getenv("OPENAI_API_KEY"):
# Note: the database will filter out rows that do not correspond to agent1 and test_user by default. texts = ["This is a test passage", "This is another test passage", "Cinderella wept"] start_date = datetime(2009, 10, 5, 18, 00) dates = [start_date, start_date - timedelta(weeks=1), start_date + timedelta(weeks=1)] roles = ["user", "assistant", "assistant"] agent_1_id = uuid.uuid4() agent_2_id = uuid.uuid4() agent_ids = [agent_1_id, agent_2_id, agent_1_id] ids = [uuid.uuid4(), uuid.uuid4(), uuid.uuid4()] user_id = uuid.uuid4() # Data generation functions: Passages def generate_passages(embed_model): """Generate list of 3 Passage objects""" # embeddings: use openai if env is set, otherwise local passages = [] for text, _, _, agent_id, id in zip(texts, dates, roles, agent_ids, ids): embedding = None if embed_model: embedding = embed_model.get_text_embedding(text) passages.append(Passage(user_id=user_id, text=text, agent_id=agent_id, embedding=embedding, data_source="test_source", id=id)) return passages # Data generation functions: Messages def generate_messages(embed_model): """Generate list of 3 Message objects""" messages = [] for text, date, role, agent_id, id in zip(texts, dates, roles, agent_ids, ids): embedding = None if embed_model: embedding = embed_model.get_text_embedding(text) messages.append( Message(user_id=user_id, text=text, agent_id=agent_id, role=role, created_at=date, id=id, model="gpt-4", embedding=embedding) ) print(messages[-1].text) return messages @pytest.fixture(autouse=True) def clear_dynamically_created_models(): """Wipe globals for SQLAlchemy""" yield for key in list(globals().keys()): if key.endswith("Model"): del globals()[key] @pytest.fixture(autouse=True) def recreate_declarative_base(): """Recreate the declarative base before each test""" global Base Base = declarative_base() yield Base.metadata.clear() @pytest.mark.parametrize("storage_connector", ["postgres", "chroma", "sqlite"]) # @pytest.mark.parametrize("storage_connector", ["sqlite", "chroma"]) # @pytest.mark.parametrize("storage_connector", ["postgres"]) @pytest.mark.parametrize("table_type", [TableType.RECALL_MEMORY, TableType.ARCHIVAL_MEMORY]) def test_storage(storage_connector, table_type, clear_dynamically_created_models, recreate_declarative_base): # setup memgpt config # TODO: set env for different config path # hacky way to cleanup globals that scruw up tests # for table_name in ['Message']: # if 'Message' in globals(): # print("Removing messages", globals()['Message']) # del globals()['Message'] config = MemGPTConfig() if storage_connector == "postgres": if not os.getenv("PGVECTOR_TEST_DB_URL"): print("Skipping test, missing PG URI") return config.archival_storage_uri = os.getenv("PGVECTOR_TEST_DB_URL") config.recall_storage_uri = os.getenv("PGVECTOR_TEST_DB_URL") config.archival_storage_type = "postgres" config.recall_storage_type = "postgres" if storage_connector == "lancedb": # TODO: complete lancedb implementation if not os.getenv("LANCEDB_TEST_URL"): print("Skipping test, missing LanceDB URI") return config.archival_storage_uri = os.getenv("LANCEDB_TEST_URL") config.recall_storage_uri = os.getenv("LANCEDB_TEST_URL") config.archival_storage_type = "lancedb" config.recall_storage_type = "lancedb" if storage_connector == "chroma": if table_type == TableType.RECALL_MEMORY: print("Skipping test, chroma only supported for archival memory") return config.archival_storage_type = "chroma" config.archival_storage_path = "./test_chroma" if storage_connector == "sqlite": if table_type == TableType.ARCHIVAL_MEMORY: print("Skipping test, sqlite only supported for recall memory") return config.recall_storage_type = "sqlite" # get embedding model embed_model = None if os.getenv("OPENAI_API_KEY"):
embedding_config = EmbeddingConfig(
5
2023-10-11 07:38:37+00:00
16k
xxlong0/Wonder3D
mvdiffusion/models/unet_mv2d_condition.py
[ { "identifier": "CrossAttnDownBlockMV2D", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class CrossAttnDownBlockMV2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n resnets = []\n attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n additional_residuals=None,\n ):\n output_states = ()\n\n blocks = list(zip(self.resnets, self.attentions))\n\n for i, (resnet, attn) in enumerate(blocks):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n # apply additional residuals to the output of the last pair of resnet and attention blocks\n if i == len(blocks) - 1 and additional_residuals is not None:\n hidden_states = hidden_states + additional_residuals\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlockMV2D", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class CrossAttnUpBlockMV2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n resnets = []\n attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "UNetMidBlockMV2DCrossAttn", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class UNetMidBlockMV2DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n\n for _ in range(num_layers):\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n num_views=1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock2D\":\n return DownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"ResnetDownsampleBlock2D\":\n return ResnetDownsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif down_block_type == \"AttnDownBlock2D\":\n if add_downsample is False:\n downsample_type = None\n else:\n downsample_type = downsample_type or \"conv\" # default to 'conv'\n return AttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n downsample_type=downsample_type,\n )\n elif down_block_type == \"CrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock2D\")\n return CrossAttnDownBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n # custom MV2D attention block\n elif down_block_type == \"CrossAttnDownBlockMV2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlockMV2D\")\n return CrossAttnDownBlockMV2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D\")\n return SimpleCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"SkipDownBlock2D\":\n return SkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnSkipDownBlock2D\":\n return AttnSkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"DownEncoderBlock2D\":\n return DownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnDownEncoderBlock2D\":\n return AttnDownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"KDownBlock2D\":\n return KDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif down_block_type == \"KCrossAttnDownBlock2D\":\n return KCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n add_self_attention=True if not add_downsample else False,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n num_views=1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock2D\":\n return UpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"ResnetUpsampleBlock2D\":\n return ResnetUpsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif up_block_type == \"CrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock2D\")\n return CrossAttnUpBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n # custom MV2D attention block\n elif up_block_type == \"CrossAttnUpBlockMV2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlockMV2D\")\n return CrossAttnUpBlockMV2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n ) \n elif up_block_type == \"SimpleCrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D\")\n return SimpleCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"AttnUpBlock2D\":\n if add_upsample is False:\n upsample_type = None\n else:\n upsample_type = upsample_type or \"conv\" # default to 'conv'\n\n return AttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n upsample_type=upsample_type,\n )\n elif up_block_type == \"SkipUpBlock2D\":\n return SkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"AttnSkipUpBlock2D\":\n return AttnSkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"UpDecoderBlock2D\":\n return UpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"AttnUpDecoderBlock2D\":\n return AttnUpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"KUpBlock2D\":\n return KUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif up_block_type == \"KCrossAttnUpBlock2D\":\n return KCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n )\n\n raise ValueError(f\"{up_block_type} does not exist.\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin, load_state_dict, _load_state_dict_into_model from diffusers.models.unet_2d_blocks import ( CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, UpBlock2D, ) from diffusers.utils import ( CONFIG_NAME, DIFFUSERS_CACHE, FLAX_WEIGHTS_NAME, HF_HUB_OFFLINE, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, _add_variant, _get_model_file, deprecate, is_accelerate_available, is_safetensors_available, is_torch_version, logging, ) from diffusers import __version__ from mvdiffusion.models.unet_mv2d_blocks import ( CrossAttnDownBlockMV2D, CrossAttnUpBlockMV2D, UNetMidBlockMV2DCrossAttn, get_down_block, get_up_block, ) import os import torch import torch.nn as nn import torch.utils.checkpoint import copy
11,839
image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetMV2DConditionOutput(BaseOutput): """ The output of [`UNet2DConditionModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor = None class UNetMV2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlockMV2DCrossAttn", up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, num_views: int = 1, cd_attention_last: bool = False, cd_attention_mid: bool = False, multiview_attention: bool = True, sparse_mv_attention: bool = False, mvcd_attention: bool = False ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
3
2023-10-14 12:18:38+00:00
16k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm_lora.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_sigmas=False,\n diffusion_steps=1000,\n snr=False,\n return_startx=False,\n):\n betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if timestep_respacing is None or timestep_respacing == \"\":\n timestep_respacing = [diffusion_steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n ((\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n )\n if pred_sigma\n else None\n ),\n loss_type=loss_type,\n snr=snr,\n return_startx=return_startx,\n # rescale_timesteps=rescale_timesteps,\n )" }, { "identifier": "get_world_size", "path": "diffusion/utils/dist_utils.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "clip_grad_norm_", "path": "diffusion/utils/dist_utils.py", "snippet": "@torch.no_grad()\ndef clip_grad_norm_(\n self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0\n) -> None:\n self._lazy_init()\n self._wait_for_previous_optim_step()\n assert self._is_root, \"clip_grad_norm should only be called on the root (parent) instance\"\n self._assert_state(TrainingState_.IDLE)\n\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n # Computes the max norm for this shard's gradients and sync's across workers\n local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]\n if norm_type == math.inf:\n total_norm = local_norm\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)\n else:\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm, group=self.process_group)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n # multiply by clip_coef, aka, (max_norm/total_norm).\n for p in self.params_with_grad:\n assert p.grad is not None\n p.grad.detach().mul_(clip_coef.to(p.grad.device))\n return total_norm" }, { "identifier": "build_dataset", "path": "diffusion/data/builder.py", "snippet": "def build_dataset(cfg, resolution=224, **kwargs):\n logger = get_root_logger()\n\n dataset_type = cfg.get('type')\n logger.info(f\"Constructing dataset {dataset_type}...\")\n t = time.time()\n transform = cfg.pop('transform', 'default_train')\n transform = get_transform(transform, resolution)\n dataset = build_from_cfg(cfg, DATASETS, default_args=dict(transform=transform, resolution=resolution, **kwargs))\n logger.info(f\"Dataset {dataset_type} constructed. time: {(time.time() - t):.2f} s, length (use/ori): {len(dataset)}/{dataset.ori_imgs_nums}\")\n return dataset" }, { "identifier": "build_dataloader", "path": "diffusion/data/builder.py", "snippet": "def build_dataloader(dataset, batch_size=256, num_workers=4, shuffle=True, **kwargs):\n if 'batch_sampler' in kwargs:\n dataloader = DataLoader(dataset, batch_sampler=kwargs['batch_sampler'], num_workers=num_workers, pin_memory=True)\n else:\n dataloader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=True,\n **kwargs)\n return dataloader" }, { "identifier": "set_data_root", "path": "diffusion/data/builder.py", "snippet": "def set_data_root(data_root):\n global DATA_ROOT\n DATA_ROOT = data_root" }, { "identifier": "get_root_logger", "path": "diffusion/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='PixArt'):\n \"\"\"Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str): logger name\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n if log_file is None:\n log_file = '/dev/null'\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n return logger" }, { "identifier": "set_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "read_config", "path": "diffusion/utils/misc.py", "snippet": "def read_config(file):\n # solve config loading conflict when multi-processes\n import time\n while True:\n config = Config.fromfile(file)\n if len(config) == 0:\n time.sleep(0.1)\n continue\n break\n return config" }, { "identifier": "init_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2 ** 31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "DebugUnderflowOverflow", "path": "diffusion/utils/misc.py", "snippet": "class DebugUnderflowOverflow:\n \"\"\"\n This debug class helps detect and understand where the model starts getting very large or very small, and more\n importantly `nan` or `inf` weight and activation elements.\n There are 2 working modes:\n 1. Underflow/overflow detection (default)\n 2. Specific batch absolute min/max tracing without detection\n Mode 1: Underflow/overflow detection\n To activate the underflow/overflow detection, initialize the object with the model :\n ```python\n debug_overflow = DebugUnderflowOverflow(model)\n ```\n then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or\n output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this\n event, each frame reporting\n 1. the fully qualified module name plus the class name whose `forward` was run\n 2. the absolute min and max value of all elements for each module weights, and the inputs and output\n For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision :\n ```\n Detected inf/nan during batch_number=0\n Last 21 forward frames:\n abs min abs max metadata\n [...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n 2.17e-07 4.50e+00 weight\n 1.79e-06 4.65e+00 input[0]\n 2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n 8.08e-07 2.66e+01 weight\n 1.79e-06 4.65e+00 input[0]\n 1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n 1.01e-06 6.44e+00 weight\n 0.00e+00 9.74e+03 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n 1.79e-06 4.65e+00 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n 3.18e-04 6.27e+04 input[0]\n 0.00e+00 inf output\n ```\n You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value\n was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\n renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n 64K, and we get an overlow.\n As you can see it's the previous frames that we need to look into when the numbers start going into very large for\n fp16 numbers.\n The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n ```\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may\n take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next\n section.\n Mode 2. Specific batch absolute min/max tracing without detection\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\n given batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])\n ```\n And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n This is helpful if you know that the program starts misbehaving after a certain batch number, so you can\n fast-forward right to that area.\n Early stopping:\n You can also specify the batch number after which to stop the training, with :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)\n ```\n This feature is mainly useful in the tracing mode, but you can use it for any mode.\n **Performance**:\n As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the\n training down. Therefore remember to turn it off once the debugging needs have been met.\n Args:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished\n \"\"\"\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n\n # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = \" \"\n\n self.analyse_model()\n\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append(\"\\n\".join(self.frame))\n self.frame = [] # start a new frame\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print(\"\\n\".join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f\"\\nDetected inf/nan during batch_number={self.batch_number} \"\n f\"Last {len(self.frames)} forward frames:\"\n f\"{'abs min':8} {'abs max':8} metadata\"\n f\"'\\n'.join(self.frames)\"\n f\"\\n\\n\")\n self.frames = []\n\n def analyse_model(self):\n # extract the fully qualified module names, to be able to report at run time. e.g.:\n # encoder.block.2.layer.0.SelfAttention.o\n #\n # for shared weights only the first shared module name will be registered\n self.module_names = {m: name for name, m in self.model.named_modules()}\n # self.longest_module_name = max(len(v) for v in self.module_names.values())\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(self.get_abs_min_max(var, ctx))\n if self.detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f\"{'None':>17} {ctx}\")\n else:\n self.expand_frame(f\"{'not a tensor':>17} {ctx}\")\n\n def batch_start_frame(self):\n self.expand_frame(f\"\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***\")\n self.expand_frame(f\"{'abs min':8} {'abs max':8} metadata\")\n\n def batch_end_frame(self):\n self.expand_frame(f\"{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n\")\n\n def create_frame(self, module, input, output):\n self.expand_frame(f\"{self.prefix} {self.module_names[module]} {module.__class__.__name__}\")\n\n # params\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n\n # inputs\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f\"input[{i}]\")\n else:\n self.analyse_variable(input, \"input\")\n\n # outputs\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n # possibly a tuple of tuples\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f\"output[{i}][{j}]\")\n else:\n self.analyse_variable(x, f\"output[{i}]\")\n else:\n self.analyse_variable(output, \"output\")\n\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n # - input is a tuple of packed inputs (could be non-Tensors)\n # - output could be a Tensor or a tuple of Tensors and non-Tensors\n\n last_frame_of_batch = False\n\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n\n # count batch numbers - the very first forward hook of the batch will be called when the\n # batch completes - i.e. it gets called very last - we know this batch has finished\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n\n self.create_frame(module, input, output)\n\n # if last_frame_of_batch:\n # self.batch_end_frame()\n\n if trace_mode:\n self.trace_frames()\n\n if last_frame_of_batch:\n self.batch_start_frame()\n\n if self.detected_overflow and not trace_mode:\n self.dump_saved_frames()\n\n # now we can abort, as it's pointless to continue running\n raise ValueError(\n \"DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. \"\n \"Please scroll up above this traceback to see the activation values prior to this event.\"\n )\n\n # abort after certain batch if requested to do so\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(\n f\"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg\"\n )\n\n @staticmethod\n def get_abs_min_max(var, ctx):\n abs_var = var.abs()\n return f\"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}\"\n\n @staticmethod\n def detect_overflow(var, ctx):\n \"\"\"\n Report whether the tensor contains any `nan` or `inf` entries.\n This is useful for detecting overflows/underflows and best to call right after the function that did some math that\n modified the tensor in question.\n This function contains a few other helper features that you can enable and tweak directly if you want to track\n various other things.\n Args:\n var: the tensor variable to check\n ctx: the message to print as a context\n Return:\n `True` if `inf` or `nan` was detected, `False` otherwise\n \"\"\"\n detected = False\n if torch.isnan(var).any().item():\n detected = True\n print(f\"{ctx} has nans\")\n if torch.isinf(var).any().item():\n detected = True\n print(f\"{ctx} has infs\")\n if var.dtype == torch.float32 and torch.ge(var.abs(), 65535).any().item():\n detected = True\n print(f\"{ctx} has overflow values {var.abs().max().item()}.\")\n # if needed to monitor large elements can enable the following\n if 0: # and detected:\n n100 = var[torch.ge(var.abs(), 100)]\n if n100.numel() > 0:\n print(f\"{ctx}: n100={n100.numel()}\")\n n1000 = var[torch.ge(var.abs(), 1000)]\n if n1000.numel() > 0:\n print(f\"{ctx}: n1000={n1000.numel()}\")\n n10000 = var[torch.ge(var.abs(), 10000)]\n if n10000.numel() > 0:\n print(f\"{ctx}: n10000={n10000.numel()}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})\")\n\n return detected" }, { "identifier": "build_optimizer", "path": "diffusion/utils/optimizer.py", "snippet": "def build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer" }, { "identifier": "auto_scale_lr", "path": "diffusion/utils/optimizer.py", "snippet": "def auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.7f} (using {rule} scaling rule).')\n return scale_ratio" }, { "identifier": "build_lr_scheduler", "path": "diffusion/utils/lr_scheduler.py", "snippet": "def build_lr_scheduler(config, optimizer, train_dataloader, lr_scale_ratio):\n if not config.get('lr_schedule_args', None):\n config.lr_schedule_args = dict()\n if config.get('lr_warmup_steps', None):\n config['num_warmup_steps'] = config.get('lr_warmup_steps') # for compatibility with old version\n\n logger = get_root_logger()\n logger.info(\n f'Lr schedule: {config.lr_schedule}, ' + \",\".join(\n [f\"{key}:{value}\" for key, value in config.lr_schedule_args.items()]) + '.')\n if config.lr_schedule == 'cosine':\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n elif config.lr_schedule == 'constant':\n lr_scheduler = get_constant_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n )\n elif config.lr_schedule == 'cosine_decay_to_constant':\n assert lr_scale_ratio >= 1\n lr_scheduler = get_cosine_decay_to_constant_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n final_lr=1 / lr_scale_ratio,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n else:\n raise RuntimeError(f'Unrecognized lr schedule {config.lr_schedule}.')\n return lr_scheduler" }, { "identifier": "AspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class AspectRatioBatchSampler(BatchSampler):\n \"\"\"A sampler wrapper for grouping images with similar aspect ratio into a same batch.\n\n Args:\n sampler (Sampler): Base sampler.\n dataset (Dataset): Dataset providing data information.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``.\n aspect_ratios (dict): The predefined aspect ratios.\n \"\"\"\n\n def __init__(self,\n sampler: Sampler,\n dataset: Dataset,\n batch_size: int,\n aspect_ratios: dict,\n drop_last: bool = False,\n config=None,\n valid_num=0, # take as valid aspect-ratio when sample number >= valid_num\n **kwargs) -> None:\n if not isinstance(sampler, Sampler):\n raise TypeError('sampler should be an instance of ``Sampler``, '\n f'but got {sampler}')\n if not isinstance(batch_size, int) or batch_size <= 0:\n raise ValueError('batch_size should be a positive integer value, '\n f'but got batch_size={batch_size}')\n self.sampler = sampler\n self.dataset = dataset\n self.batch_size = batch_size\n self.aspect_ratios = aspect_ratios\n self.drop_last = drop_last\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n self.config = config\n assert self.ratio_nums_gt\n # buckets for each aspect ratio\n self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios.keys()}\n self.current_available_bucket_keys = [str(k) for k, v in self.ratio_nums_gt.items() if v >= valid_num]\n logger = get_root_logger() if config is None else get_root_logger(os.path.join(config.work_dir, 'train_log.log'))\n logger.warning(f\"Using valid_num={valid_num} in config file. Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n # find the closest aspect ratio\n closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n bucket = self._aspect_ratio_buckets[closest_ratio]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the buckets\n for bucket in self._aspect_ratio_buckets.values():\n while len(bucket) > 0:\n if len(bucket) <= self.batch_size:\n if not self.drop_last:\n yield bucket[:]\n bucket = []\n else:\n yield bucket[:self.batch_size]\n bucket = bucket[self.batch_size:]" }, { "identifier": "BalancedAspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class BalancedAspectRatioBatchSampler(AspectRatioBatchSampler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Assign samples to each bucket\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n assert self.ratio_nums_gt\n self._aspect_ratio_buckets = {float(ratio): [] for ratio in self.aspect_ratios.keys()}\n self.original_buckets = {}\n self.current_available_bucket_keys = [k for k, v in self.ratio_nums_gt.items() if v >= 3000]\n self.all_available_keys = deepcopy(self.current_available_bucket_keys)\n self.exhausted_bucket_keys = []\n self.total_batches = len(self.sampler) // self.batch_size\n self._aspect_ratio_count = {}\n for k in self.all_available_keys:\n self._aspect_ratio_count[float(k)] = 0\n self.original_buckets[float(k)] = []\n logger = get_root_logger(os.path.join(self.config.work_dir, 'train_log.log'))\n logger.warning(f\"Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n i = 0\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n closest_ratio = float(min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio)))\n if closest_ratio not in self.all_available_keys:\n continue\n if self._aspect_ratio_count[closest_ratio] < self.ratio_nums_gt[closest_ratio]:\n self._aspect_ratio_count[closest_ratio] += 1\n self._aspect_ratio_buckets[closest_ratio].append(idx)\n self.original_buckets[closest_ratio].append(idx) # Save the original samples for each bucket\n if not self.current_available_bucket_keys:\n self.current_available_bucket_keys, self.exhausted_bucket_keys = self.exhausted_bucket_keys, []\n\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n key = closest_ratio\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) == self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n i += 1\n self.exhausted_bucket_keys.append(key)\n self.current_available_bucket_keys.remove(key)\n\n for _ in range(self.total_batches - i):\n key = choice(self.all_available_keys)\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) >= self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n\n # If a bucket is exhausted\n if not bucket:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])\n else:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])" } ]
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn.functional as F import numpy as np import re import accelerate from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from packaging import version from diffusion import IDDPM from diffusion.utils.dist_utils import get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from peft import LoraConfig, get_peft_model, get_peft_model_state_dict from diffusers import AutoencoderKL, Transformer2DModel, StableDiffusionPipeline, PixArtAlphaPipeline from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
11,110
logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) logger.info(accelerator.state) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma # prepare null_embedding for training if not os.path.exists('output/pretrained_models/null_embed.pth'): logger.info(f"Creating output/pretrained_models/null_embed.pth") os.makedirs('output/pretrained_models/', exist_ok=True) pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16, use_safetensors=True,).to("cuda") torch.save(pipe.encode_prompt(""), 'output/pretrained_models/null_embed.pth') del pipe torch.cuda.empty_cache() # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, return_startx=True) model_teacher = Transformer2DModel.from_pretrained(config.load_from, subfolder="transformer") model_teacher.requires_grad_(False) model = Transformer2DModel.from_pretrained(config.load_from, subfolder="transformer").train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):}") lora_config = LoraConfig( r=config.lora_rank, target_modules=[ "to_q", "to_k", "to_v", "to_out.0", "proj_in", "proj_out", "ff.net.0.proj", "ff.net.2", "proj", "linear", "linear_1", "linear_2", # "scale_shift_table", # not available due to the implementation in huggingface/peft, working on it. ], ) print(lora_config) model = get_peft_model(model, lora_config) model.print_trainable_parameters() # 9. Handle mixed precision and device placement # For mixed precision training we cast all non-trainable weigths to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # 11. Enable optimizations # model.enable_xformers_memory_efficient_attention() # model_teacher.enable_xformers_memory_efficient_attention() lora_layers = filter(lambda p: p.requires_grad, model.parameters()) # for name, params in model.named_parameters(): # if params.requires_grad == False: logger.info(f"freeze param: {name}") # # for name, params in model.named_parameters(): # if params.requires_grad == True: logger.info(f"trainable param: {name}") # 10. Handle saving and loading of checkpoints # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: transformer_ = accelerator.unwrap_model(models[0]) lora_state_dict = get_peft_model_state_dict(transformer_, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(output_dir, "transformer_lora"), lora_state_dict) # save weights in peft format to be able to load them back transformer_.save_pretrained(output_dir) for _, model in enumerate(models): # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): # load the LoRA into the model transformer_ = accelerator.unwrap_model(models[0]) transformer_.load_adapter(input_dir, "default", is_trainable=True) for _ in range(len(models)): # pop models so that they are not loaded again models.pop() accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if config.grad_checkpointing: model.enable_gradient_checkpointing() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale:
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def filter_keys(key_set): def _f(dictionary): return {k: v for k, v in dictionary.items() if k in key_set} return _f def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out # Compare LCMScheduler.step, Step 4 def predicted_origin(model_output, timesteps, sample, prediction_type, alphas, sigmas): if prediction_type == "epsilon": sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) alphas = extract_into_tensor(alphas, timesteps, sample.shape) pred_x_0 = (sample - sigmas * model_output) / alphas elif prediction_type == "v_prediction": sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) alphas = extract_into_tensor(alphas, timesteps, sample.shape) pred_x_0 = alphas * sample - sigmas * model_output else: raise ValueError(f"Prediction type {prediction_type} currently not supported.") return pred_x_0 def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev def train(model): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() global_step = start_step load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = torch.load('output/pretrained_models/null_embed.pth', map_location='cpu').to(accelerator.device).repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = (z * config.scale_factor).to(weight_dtype) y = batch[1].squeeze(1).to(weight_dtype) y_mask = batch[2].squeeze(1).squeeze(1).to(weight_dtype) data_info = {'resolution': batch[3]['img_hw'].to(weight_dtype), 'aspect_ratio': batch[3]['aspect_ratio'].to(weight_dtype),} # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses_diffusers( model, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses_diffusers( model_teacher, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses_diffusers( model_teacher, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=uncond_prompt_embeds, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses_diffusers( model, x_prev.float(), timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), skip_noise=True ) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() accelerator.wait_for_everyone() if accelerator.is_main_process: if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: save_path = os.path.join(os.path.join(config.work_dir, 'checkpoints'), f"checkpoint-{(epoch - 1) * len(train_dataloader) + step + 1}") os.umask(0o000) logger.info(f"Start to save state to {save_path}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") accelerator.wait_for_everyone() if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_path = os.path.join(os.path.join(config.work_dir, 'checkpoints'), f"checkpoint-{(epoch - 1) * len(train_dataloader) + step + 1}") logger.info(f"Start to save state to {save_path}") model = accelerator.unwrap_model(model) model.save_pretrained(save_path) lora_state_dict = get_peft_model_state_dict(model, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(save_path, "transformer_lora"), lora_state_dict) logger.info(f"Saved state to {save_path}") def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument("--work-dir", default='output', help='the dir to save logs and models') parser.add_argument("--resume-from", help='the dir to save logs and models') parser.add_argument("--local-rank", type=int, default=-1) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--debug", action='store_true') parser.add_argument("--lora_rank", type=int, default=64, help="The rank of the LoRA projection matrix.", ) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) config.resume_from = None if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.resume_from = args.resume_from if args.debug: config.log_interval = 1 config.train_batch_size = 4 config.valid_num = 10 config.save_model_steps = 10 os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) logger.info(accelerator.state) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma # prepare null_embedding for training if not os.path.exists('output/pretrained_models/null_embed.pth'): logger.info(f"Creating output/pretrained_models/null_embed.pth") os.makedirs('output/pretrained_models/', exist_ok=True) pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16, use_safetensors=True,).to("cuda") torch.save(pipe.encode_prompt(""), 'output/pretrained_models/null_embed.pth') del pipe torch.cuda.empty_cache() # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, return_startx=True) model_teacher = Transformer2DModel.from_pretrained(config.load_from, subfolder="transformer") model_teacher.requires_grad_(False) model = Transformer2DModel.from_pretrained(config.load_from, subfolder="transformer").train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):}") lora_config = LoraConfig( r=config.lora_rank, target_modules=[ "to_q", "to_k", "to_v", "to_out.0", "proj_in", "proj_out", "ff.net.0.proj", "ff.net.2", "proj", "linear", "linear_1", "linear_2", # "scale_shift_table", # not available due to the implementation in huggingface/peft, working on it. ], ) print(lora_config) model = get_peft_model(model, lora_config) model.print_trainable_parameters() # 9. Handle mixed precision and device placement # For mixed precision training we cast all non-trainable weigths to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # 11. Enable optimizations # model.enable_xformers_memory_efficient_attention() # model_teacher.enable_xformers_memory_efficient_attention() lora_layers = filter(lambda p: p.requires_grad, model.parameters()) # for name, params in model.named_parameters(): # if params.requires_grad == False: logger.info(f"freeze param: {name}") # # for name, params in model.named_parameters(): # if params.requires_grad == True: logger.info(f"trainable param: {name}") # 10. Handle saving and loading of checkpoints # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: transformer_ = accelerator.unwrap_model(models[0]) lora_state_dict = get_peft_model_state_dict(transformer_, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(output_dir, "transformer_lora"), lora_state_dict) # save weights in peft format to be able to load them back transformer_.save_pretrained(output_dir) for _, model in enumerate(models): # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): # load the LoRA into the model transformer_ = accelerator.unwrap_model(models[0]) transformer_.load_adapter(input_dir, "default", is_trainable=True) for _ in range(len(models)): # pop models so that they are not loaded again models.pop() accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if config.grad_checkpointing: model.enable_gradient_checkpointing() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale:
batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset,
14
2023-10-12 14:16:33+00:00
16k
showlab/MotionDirector
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n \n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n frame_step: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.frame_step = frame_step\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_range(self, vr):\n return get_video_frames(\n vr, \n self.sample_start_idx, \n self.frame_step, \n self.n_sample_frames\n )\n \n def get_vid_idx(self, vr, vid_data=None):\n frames = self.n_sample_frames\n\n if vid_data is not None:\n idx = vid_data['frame_index']\n else:\n idx = self.sample_start_idx\n\n return idx\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n frame_range = self.get_frame_range(vr)\n frames = vr.get_batch(frame_range)\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def train_data_batch(self, index):\n\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n\n vid_data = self.train_data[index]\n\n clip_path = vid_data['clip_path']\n \n # Get video prompt\n prompt = vid_data['prompt']\n\n video, _ = self.process_video_wrapper(clip_path)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n # Assign train data\n train_data = self.train_data[index]\n \n # Get the frame of the current index.\n self.sample_start_idx = train_data['frame_index']\n \n # Initialize resize\n resize = None\n\n video, vr = self.process_video_wrapper(train_data[self.vid_data_key])\n\n # Get video prompt\n prompt = train_data['prompt']\n vr.seek(0)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'json'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n \n # Initialize variables\n video = None\n prompt = None\n prompt_ids = None\n\n # Use default JSON training\n if self.train_data is not None:\n video, prompt, prompt_ids = self.train_data_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(0, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n h, w, c = vr[0].shape\n width, height = sensible_buckets(self.width, self.height, w, h)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n\n self.image_dir = self.get_images_list(image_dir)\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img = train_data\n\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=16)\n\n prompt = get_text_prompt(\n file_path=train_data,\n text_prompt=self.single_img_prompt,\n fallback_prompt=self.fallback_prompt,\n ext_types=self.img_types, \n use_caption=True\n )\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n if os.path.exists(self.image_dir[0]):\n return len(self.image_dir)\n else:\n return 0\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": (img / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n h, w, c = vr[0].shape\n width, height = sensible_buckets(self.width, self.height, w, h)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n def get_prompt_ids(self, prompt):\n return self.tokenizer(\n prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n\n video, _ = self.process_video_wrapper(self.video_files[index])\n\n prompt = self.fallback_prompt\n\n prompt_ids = self.get_prompt_ids(prompt)\n\n return {\"pixel_values\": (video[0] / 127.5 - 1.0), \"prompt_ids\": prompt_ids[0], \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occurred while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)" }, { "identifier": "extract_lora_child_module", "path": "utils/lora.py", "snippet": "def extract_lora_child_module(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append(_child_module)\n\n return loras" }, { "identifier": "ddim_inversion", "path": "utils/ddim_utils.py", "snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents" } ]
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
11,326
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]:
1
2023-10-12 12:06:55+00:00
16k
NVlabs/EmerNeRF
datasets/base/split_wrapper.py
[ { "identifier": "SceneLidarSource", "path": "datasets/base/lidar_source.py", "snippet": "class SceneLidarSource(abc.ABC):\n \"\"\"\n The base class for the lidar source of a scene.\n \"\"\"\n\n data_cfg: OmegaConf = None\n # the normalized timestamps of all points (normalized to [0, 1]), shape: (num_points,)\n _normalized_timestamps: Tensor = None\n # the timestamps of all points, shape: (num_points,)\n _timestamps: Tensor = None\n # the timesteps of all points, shape: (num_points,)\n # - the difference between timestamps and timesteps is that\n # timestamps are the actual timestamps (minus 1e9) of lidar scans,\n # while timesteps are the integer timestep indices of lidar scans.\n _timesteps: Tensor = None\n # origin of each lidar point, shape: (num_points, 3)\n origins: Tensor = None\n # unit direction of each lidar point, shape: (num_points, 3)\n directions: Tensor = None\n # range of each lidar point, shape: (num_points,)\n ranges: Tensor = None\n # the transformation matrices from lidar to world coordinate system,\n lidar_to_worlds: Tensor = None\n # the indices of the lidar scans that are cached\n cached_indices: Tensor = None\n cached_origins: Tensor = None\n cached_directions: Tensor = None\n cached_ranges: Tensor = None\n cached_normalized_timestamps: Tensor = None\n\n def __init__(\n self,\n lidar_data_config: OmegaConf,\n device: torch.device = torch.device(\"cpu\"),\n ) -> None:\n # hold the config of the lidar data\n self.data_cfg = lidar_data_config\n self.device = device\n\n @abc.abstractmethod\n def create_all_filelist(self) -> None:\n \"\"\"\n Create a list of all the files in the dataset.\n e.g., a list of all the lidar scans in the dataset.\n \"\"\"\n raise NotImplementedError\n\n def load_data(self):\n self.load_calibrations()\n self.load_lidar()\n logger.info(\"[Lidar] All Lidar Data loaded.\")\n\n def to(self, device: torch.device) -> \"SceneLidarSource\":\n \"\"\"\n Move the dataset to the given device.\n Args:\n device: the device to move the dataset to.\n \"\"\"\n self.device = device\n if self.origins is not None:\n self.origins = self.origins.to(device)\n if self.directions is not None:\n self.directions = self.directions.to(device)\n if self.ranges is not None:\n self.ranges = self.ranges.to(device)\n if self._timestamps is not None:\n self._timestamps = self._timestamps.to(device)\n if self._timesteps is not None:\n self._timesteps = self._timesteps.to(device)\n if self._normalized_timestamps is not None:\n self._normalized_timestamps = self._normalized_timestamps.to(device)\n if self.lidar_to_worlds is not None:\n self.lidar_to_worlds = self.lidar_to_worlds.to(device)\n return self\n\n @abc.abstractmethod\n def load_calibrations(self) -> None:\n \"\"\"\n Load the calibration files of the dataset.\n e.g., lidar to world transformation matrices.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def load_lidar(self) -> None:\n \"\"\"\n Load the lidar data of the dataset from the filelist.\n \"\"\"\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n \"\"\"\n Returns:\n aabb_min, aabb_max: the min and max of the axis-aligned bounding box of the scene\n Note:\n we assume the lidar points are already in the world coordinate system\n we first downsample the lidar points, then compute the aabb by taking the\n given percentiles of the lidar coordinates in each dimension.\n \"\"\"\n assert (\n self.origins is not None\n and self.directions is not None\n and self.ranges is not None\n ), \"Lidar points not loaded, cannot compute aabb.\"\n logger.info(\"[Lidar] Computing auto AABB based on downsampled lidar points....\")\n\n lidar_pts = self.origins + self.directions * self.ranges\n\n # downsample the lidar points by uniformly sampling a subset of them\n lidar_pts = lidar_pts[\n torch.randperm(len(lidar_pts))[\n : int(len(lidar_pts) / self.data_cfg.lidar_downsample_factor)\n ]\n ]\n # compute the aabb by taking the given percentiles of the lidar coordinates in each dimension\n aabb_min = torch.quantile(lidar_pts, self.data_cfg.lidar_percentile, dim=0)\n aabb_max = torch.quantile(lidar_pts, 1 - self.data_cfg.lidar_percentile, dim=0)\n del lidar_pts\n torch.cuda.empty_cache()\n\n # usually the lidar's height is very small, so we slightly increase the height of the aabb\n if aabb_max[-1] < 20:\n aabb_max[-1] = 20.0\n aabb = torch.tensor([*aabb_min, *aabb_max])\n logger.info(f\"[Lidar] Auto AABB from LiDAR: {aabb}\")\n return aabb\n\n @property\n def num_timesteps(self) -> int:\n \"\"\"\n Returns:\n the number of lidar timestamps in the dataset,\n usually the number of captured lidar scans.\n \"\"\"\n return len(self.timesteps.unique())\n\n @property\n def timesteps(self) -> Tensor:\n \"\"\"\n Returns:\n the integer timestep indices of each lidar timestamp,\n shape: (num_lidar_points,)\n Note:\n the difference between timestamps and timesteps is that\n timestamps are the actual timestamps (minus 1e9) of the lidar scans,\n while timesteps are the integer timestep indices of the lidar scans.\n \"\"\"\n return self._timesteps\n\n @property\n def timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the actual timestamps (minus 1e9) of the lidar scans.\n shape: (num_lidar_points,)\n \"\"\"\n return self._timestamps\n\n @property\n def normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_lidar_points,)\n \"\"\"\n return self._normalized_timestamps\n\n @property\n def unique_normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the unique normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_timesteps,)\n \"\"\"\n return self._unique_normalized_timestamps\n\n def register_normalized_timestamps(self, normalized_timestamps: Tensor) -> None:\n \"\"\"\n Register the normalized timestamps of the lidar scans.\n Args:\n normalized_timestamps: the normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_lidar_points,)\n Note:\n we normalize the lidar timestamps together with the image timestamps,\n so that the both the lidar and image timestamps are in the range [0, 1].\n \"\"\"\n assert normalized_timestamps.size(0) == self.origins.size(\n 0\n ), \"The number of lidar points and the number of normalized timestamps must match.\"\n assert (\n normalized_timestamps.min() >= 0 and normalized_timestamps.max() <= 1\n ), \"The normalized timestamps must be in the range [0, 1].\"\n self._normalized_timestamps = normalized_timestamps.to(self.device)\n self._unique_normalized_timestamps = self._normalized_timestamps.unique()\n\n def find_closest_timestep(self, normed_timestamp: float) -> int:\n \"\"\"\n Find the closest timestep to the given timestamp.\n Args:\n normed_timestamp: the normalized timestamp to find the closest timestep for.\n Returns:\n the closest timestep to the given timestamp.\n \"\"\"\n return torch.argmin(\n torch.abs(self.unique_normalized_timestamps - normed_timestamp)\n )\n\n def sample_uniform_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Tensor:\n \"\"\"\n Sample a batch of rays uniformly from the dataset.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the lidar scans to sample from.\n If None, sample from all the lidar scans.\n If not None, sample from the given lidar scans.\n Returns:\n lidar_idx: the indices of the sampled lidar points.\n shape: (num_rays,)\n \"\"\"\n if candidate_indices is None:\n return torch.randint(\n 0, len(self.origins), size=(num_rays,), device=self.device\n )\n else:\n if not isinstance(candidate_indices, Tensor):\n candidate_indices = torch.tensor(candidate_indices, device=self.device)\n if self.cached_indices is None:\n self.cached_indices = candidate_indices\n mask = self.timesteps.new_zeros(\n self.timesteps.size(0), dtype=torch.bool\n ) # Create a mask of False\n for index in self.cached_indices:\n mask |= (\n self.timesteps == index\n ) # Set mask values to True where timesteps match an index\n self.cached_origins = self.origins[mask]\n self.cached_directions = self.directions[mask]\n self.cached_ranges = self.ranges[mask]\n self.cached_normalized_timestamps = self.normalized_timestamps[mask]\n if not torch.equal(candidate_indices, self.cached_indices):\n print(\"Recomputing cached indices\")\n self.cached_indices = candidate_indices\n mask = self.timesteps.new_zeros(\n self.timesteps.size(0), dtype=torch.bool\n ) # Create a mask of False\n for index in self.cached_indices:\n mask |= (\n self.timesteps == index\n ) # Set mask values to True where timesteps match an index\n self.cached_origins = self.origins[mask]\n self.cached_directions = self.directions[mask]\n self.cached_ranges = self.ranges[mask]\n self.cached_normalized_timestamps = self.normalized_timestamps[mask]\n random_idx = torch.randint(\n 0,\n len(self.cached_origins),\n size=(num_rays,),\n device=self.device,\n )\n return random_idx\n\n def get_train_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Get a batch of rays for training.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the lidar scans to sample from.\n If None, sample from all the lidar scans.\n If not None, sample from the given lidar scans.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n lidar_idx = self.sample_uniform_rays(\n num_rays=num_rays, candidate_indices=candidate_indices\n )\n origins = self.cached_origins[lidar_idx]\n directions = self.cached_directions[lidar_idx]\n ranges = self.cached_ranges[lidar_idx]\n normalized_timestamps = self.cached_normalized_timestamps[lidar_idx]\n return {\n \"lidar_origins\": origins,\n \"lidar_viewdirs\": directions,\n \"lidar_ranges\": ranges,\n \"lidar_normed_timestamps\": normalized_timestamps,\n }\n\n def get_render_rays(self, time_idx: int) -> Dict[str, Tensor]:\n \"\"\"\n Get the of rays for rendering at the given timestep.\n Args:\n time_idx: the index of the lidar scan to render.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n origins = self.origins[self.timesteps == time_idx]\n directions = self.directions[self.timesteps == time_idx]\n ranges = self.ranges[self.timesteps == time_idx]\n normalized_timestamps = self.normalized_timestamps[self.timesteps == time_idx]\n return {\n \"lidar_origins\": origins,\n \"lidar_viewdirs\": directions,\n \"lidar_ranges\": ranges,\n \"lidar_normed_timestamps\": normalized_timestamps,\n }" }, { "identifier": "ScenePixelSource", "path": "datasets/base/pixel_source.py", "snippet": "class ScenePixelSource(abc.ABC):\n \"\"\"\n The base class for all pixel sources of a scene.\n \"\"\"\n\n # the original size of the images in the dataset\n # these values are from the waymo dataset as a placeholder\n ORIGINAL_SIZE = [[1280, 1920], [1280, 1920], [1280, 1920], [884, 1920], [884, 1920]]\n\n # define a transformation matrix to convert the opencv camera coordinate system to the dataset camera coordinate system\n OPENCV2DATASET = np.array(\n [[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]]\n )\n data_cfg: OmegaConf = None\n # the normalized timestamps of all images (normalized to [0, 1]), shape: (num_imgs,)\n _normalized_timestamps: Tensor = None\n # the timestamps of all images, shape: (num_imgs,)\n _timestamps: Tensor = None\n # the timesteps of all images, shape: (num_imgs,)\n # - the difference between timestamps and timesteps is that\n # timestamps are the actual timestamps (minus 1e9) of images\n # while timesteps are the integer timestep indices of images.\n _timesteps: Tensor = None\n # camera ids of all images, shape: (num_imgs,)\n cam_ids: Tensor = None\n # camera-to-world matrices of all images, shape: (num_imgs, 4, 4)\n cam_to_worlds: Tensor = None\n # camera intrinsic matrices of all images, shape: (num_imgs, 3, 3)\n intrinsics: Tensor = None\n # all image tensors, shape: (num_imgs, load_size[0], load_size[1], 3)\n images: Tensor = None\n # the image ids of all images, shape: (num_imgs,)\n img_ids: Tensor = None\n # the binary masks of dynamic objects, shape: (num_imgs, load_size[0], load_size[1])\n dynamic_masks: Tensor = None\n # the binary masks of sky regions, shape: (num_imgs, load_size[0], load_size[1])\n sky_masks: Tensor = None\n # the feature tensors, shape: (num_imgs, num_patches_h, num_patches_w, C)\n features: Tensor = None\n # the pca matrix used to reduce the feature dimension to target_feature_dim,\n # shape: (original_feature_dim, target_feature_dim)\n reduce_to_target_dim_mat: Tensor = None\n # the min and max values of the reduced features used for normalization,\n # shape: (target_feature_dim,)\n feat_min: Tensor = None\n feat_max: Tensor = None\n\n # the pca matrix used to reduce the feature dimension for visualization,\n # shape: (target_feature_dim, 3)\n feat_dimension_reduction_mat: Tensor = None\n # the min and max values of the original features used for visualization,\n # shape: (3,)\n feat_color_min: Tensor = None\n feat_color_max: Tensor = None\n # the downscale factor of the features, shape: (2,)\n featmap_downscale_factor: Tuple[float, float] = None\n\n # importance sampling weights of all images,\n # shape: (num_imgs, load_size[0] // buffer_scale, load_size[1] // buffer_scale)\n pixel_error_maps: Tensor = None\n pixel_error_buffered: bool = False\n\n def __init__(\n self, pixel_data_config: OmegaConf, device: torch.device = torch.device(\"cpu\")\n ) -> None:\n # hold the config of the pixel data\n self.data_cfg = pixel_data_config\n self.device = device\n self._downscale_factor = 1 / pixel_data_config.downscale\n self._old_downscale_factor = 1 / pixel_data_config.downscale\n\n @abc.abstractmethod\n def create_all_filelist(self) -> None:\n \"\"\"\n Create file lists for all data files.\n e.g., img files, feature files, etc.\n \"\"\"\n self.img_filepaths = []\n self.feat_filepaths = []\n self.sky_mask_filepaths = []\n self.dynamic_mask_filepaths = []\n raise NotImplementedError\n\n @abc.abstractmethod\n def load_calibrations(self) -> None:\n \"\"\"\n Load the camera intrinsics, extrinsics, timestamps, etc.\n Compute the camera-to-world matrices, ego-to-world matrices, etc.\n \"\"\"\n raise NotImplementedError\n\n def load_data(self) -> None:\n \"\"\"\n A general function to load all data.\n \"\"\"\n self.load_calibrations()\n self.load_rgb()\n self.load_dynamic_mask()\n self.load_sky_mask()\n self.load_features()\n # build the pixel error buffer\n self.build_pixel_error_buffer()\n logger.info(\"[Pixel] All Pixel Data loaded.\")\n\n def to(self, device: torch.device) -> \"ScenePixelSource\":\n \"\"\"\n Move the dataset to the given device.\n Args:\n device: the device to move the dataset to.\n \"\"\"\n self.device = device\n if self.images is not None:\n self.images = self.images.to(device)\n if self.dynamic_masks is not None:\n self.dynamic_masks = self.dynamic_masks.to(device)\n if self.sky_masks is not None:\n self.sky_masks = self.sky_masks.to(device)\n if self.features is not None:\n # this step can be dangerous because the features are huge\n # TODO: add a flag to control whether to move the features to GPU\n self.features = self.features.to(device)\n if self.reduce_to_target_dim_mat is not None:\n self.reduce_to_target_dim_mat = self.reduce_to_target_dim_mat.to(\n self.device\n )\n if self.feat_min is not None:\n self.feat_min = self.feat_min.to(self.device)\n self.feat_max = self.feat_max.to(self.device)\n if self.feat_dimension_reduction_mat is not None:\n self.feat_dimension_reduction_mat = (\n self.feat_dimension_reduction_mat.to(self.device)\n )\n self.feat_color_min = self.feat_color_min.to(self.device)\n self.feat_color_max = self.feat_color_max.to(self.device)\n if self.cam_to_worlds is not None:\n self.cam_to_worlds = self.cam_to_worlds.to(device)\n if self.intrinsics is not None:\n self.intrinsics = self.intrinsics.to(device)\n if self.cam_ids is not None:\n self.cam_ids = self.cam_ids.to(device)\n if self._timestamps is not None:\n self._timestamps = self._timestamps.to(device)\n if self._timesteps is not None:\n self._timesteps = self._timesteps.to(device)\n if self._normalized_timestamps is not None:\n self._normalized_timestamps = self._normalized_timestamps.to(device)\n if self.pixel_error_maps is not None:\n self.pixel_error_maps = self.pixel_error_maps.to(device)\n return self\n\n def load_rgb(self) -> None:\n \"\"\"\n Load the RGB images if they are available. We cache the images in memory for faster loading.\n Note this can be memory consuming.\n \"\"\"\n if not self.data_cfg.load_rgb:\n return\n images = []\n for fname in tqdm(\n self.img_filepaths, desc=\"Loading images\", dynamic_ncols=True\n ):\n rgb = Image.open(fname).convert(\"RGB\")\n # resize them to the load_size\n rgb = rgb.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.BILINEAR\n )\n images.append(rgb)\n # normalize the images to [0, 1]\n self.images = torch.from_numpy(np.stack(images, axis=0)) / 255\n self.img_ids = torch.arange(len(self.images)).long()\n\n def load_dynamic_mask(self) -> None:\n \"\"\"\n Load the dynamic masks if they are available.\n \"\"\"\n if not self.data_cfg.load_dynamic_mask:\n return\n dynamic_masks = []\n for fname in tqdm(\n self.dynamic_mask_filepaths,\n desc=\"Loading dynamic masks\",\n dynamic_ncols=True,\n ):\n dyn_mask = Image.open(fname).convert(\"L\")\n # resize them to the load_size\n dyn_mask = dyn_mask.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.BILINEAR\n )\n dynamic_masks.append(np.array(dyn_mask) > 0)\n self.dynamic_masks = torch.from_numpy(np.stack(dynamic_masks, axis=0)).float()\n\n def load_sky_mask(self) -> None:\n \"\"\"\n Load the sky masks if they are available.\n \"\"\"\n if not self.data_cfg.load_sky_mask:\n return\n sky_masks = []\n for fname in tqdm(\n self.sky_mask_filepaths, desc=\"Loading sky masks\", dynamic_ncols=True\n ):\n sky_mask = Image.open(fname).convert(\"L\")\n # resize them to the load_size\n sky_mask = sky_mask.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.NEAREST\n )\n sky_masks.append(np.array(sky_mask) > 0)\n self.sky_masks = torch.from_numpy(np.stack(sky_masks, axis=0)).float()\n\n def load_features(self) -> None:\n \"\"\"\n Load the features if they are available.\n \"\"\"\n if not self.data_cfg.load_features:\n return\n\n if not self.data_cfg.skip_feature_extraction:\n logger.info(f\"Extracting {self.data_cfg.feature_model_type}...\")\n return_dict = extract_and_save_features(\n input_img_path_list=self.img_filepaths,\n saved_feat_path_list=self.feat_filepaths,\n img_shape=self.data_cfg.feature_extraction_size,\n stride=self.data_cfg.feature_extraction_stride,\n model_type=self.data_cfg.feature_model_type,\n )\n\n features = []\n for fname in tqdm(\n self.feat_filepaths, desc=\"Loading features\", dynamic_ncols=True\n ):\n # mmap_mode=\"r\" is to avoid memory overflow when loading features\n # but it only slightly helps... do we have a better way to load features?\n feature = np.load(fname, mmap_mode=\"r\").squeeze()\n features.append(feature)\n # shape: (num_imgs, num_patches_h, num_patches_w, C)\n self.features = torch.from_numpy(np.stack(features, axis=0)).float()\n # featmap_downscale_factor is used to convert the image coordinates to ViT feature coordinates.\n # resizing ViT features to (H, W) using bilinear interpolation is infeasible.\n # imagine a feature array of shape (num_timesteps x num_cams, 640, 960, 768). it's too large to fit in GPU memory.\n self.featmap_downscale_factor = (\n self.features.shape[1] / self.data_cfg.load_size[0],\n self.features.shape[2] / self.data_cfg.load_size[1],\n )\n logger.info(\n f\"Loaded {self.features.shape} {self.data_cfg.feature_model_type} features.\"\n )\n logger.info(f\"Feature scale: {self.featmap_downscale_factor}\")\n logger.info(f\"Computing features PCA...\")\n # compute feature visualization matrix\n C = self.features.shape[-1]\n # no need to compute PCA on the entire set of features, we randomly sample 100k features\n temp_feats = self.features.reshape(-1, C)\n max_elements_to_compute_pca = min(100000, temp_feats.shape[0])\n selected_features = temp_feats[\n np.random.choice(\n temp_feats.shape[0], max_elements_to_compute_pca, replace=False\n )\n ]\n if self.data_cfg.target_feature_dim is not None:\n logger.info(\n f\"Reducing features to {self.data_cfg.target_feature_dim} dimensions.\"\n )\n # compute PCA to reduce the feature dimension to target_feature_dim\n U, S, reduce_to_target_dim_mat = torch.pca_lowrank(\n selected_features, q=self.data_cfg.target_feature_dim, niter=20\n )\n # compute the fraction of variance explained by target_feature_dim\n variances = S**2\n fraction_var_explained = variances / variances.sum()\n logger.info(f\"[PCA] fraction_var_explained: \\n{fraction_var_explained}\")\n logger.info(\n f\"[PCA] fraction_var_explained sum: {fraction_var_explained.sum()}\",\n )\n self.reduce_to_target_dim_mat = reduce_to_target_dim_mat\n\n # reduce the features to target_feature_dim\n selected_features = selected_features @ reduce_to_target_dim_mat\n self.features = self.features @ reduce_to_target_dim_mat\n C = self.features.shape[-1]\n\n # normalize the reduced features to [0, 1] along each dimension\n feat_min = self.features.reshape(-1, C).min(dim=0)[0]\n feat_max = self.features.reshape(-1, C).max(dim=0)[0]\n self.features = (self.features - feat_min) / (feat_max - feat_min)\n selected_features = (selected_features - feat_min) / (feat_max - feat_min)\n self.feat_min = feat_min.to(self.device)\n self.feat_max = feat_max.to(self.device)\n self.reduce_to_target_dim_mat = reduce_to_target_dim_mat.to(self.device)\n # we compute the first 3 principal components of the ViT features as the color\n reduction_mat, feat_color_min, feat_color_max = get_robust_pca(\n selected_features\n )\n # final features are of shape (num_imgs, num_patches_h, num_patches_w, target_feature_dim)\n self.features = self.features\n\n # save visualization parameters\n self.feat_dimension_reduction_mat = reduction_mat\n self.feat_color_min = feat_color_min\n self.feat_color_max = feat_color_max\n del temp_feats, selected_features\n\n logger.info(\n f\"Feature PCA computed, shape: {self.feat_dimension_reduction_mat.shape}\"\n )\n\n def delete_features(self) -> None:\n \"\"\"\n Delete the features if they exist.\n This is to save disk space. 2D features of a single sequence can be 30GB+.\n \"\"\"\n delete_features(self.feat_filepaths)\n\n def get_aabb(self) -> Tensor:\n \"\"\"\n Returns:\n aabb_min, aabb_max: the min and max of the axis-aligned bounding box of the scene\n Note:\n We compute the coarse aabb by using the front camera positions / trajectories. We then\n extend this aabb by 40 meters along horizontal directions and 20 meters up and 5 meters\n down along vertical directions.\n \"\"\"\n assert (\n self.cam_to_worlds is not None\n ), \"Camera poses not loaded, cannot compute aabb.\"\n logger.info(\"[Pixel] Computing auto AABB based on front camera trajectory....\")\n if self.num_cams == 1:\n # if there is only one camera, it's front camera\n front_cameras_positions = self.cam_to_worlds[:, :3, 3]\n elif self.num_cams == 3:\n # if there are three cameras, they are ordered as front_left, front, front_right\n front_cameras_positions = self.cam_to_worlds[1::3, :3, 3]\n elif self.num_cams == 5:\n # if there are five cameras, they are ordered as side_left, front_left, front, front_right, side_right\n front_cameras_positions = self.cam_to_worlds[2::5, :3, 3]\n elif self.num_cams == 6:\n # if there are six cameras, they are ordered as front_left, front, front_right, back_left, back, back_right\n front_cameras_positions = self.cam_to_worlds[2::6, :3, 3]\n\n # compute the aabb\n aabb_min = front_cameras_positions.min(dim=0)[0]\n aabb_max = front_cameras_positions.max(dim=0)[0]\n\n # extend aabb by 40 meters along forward direction and 40 meters along the left/right direction\n # aabb direction: x, y, z: front, left, up\n aabb_max[0] += 40\n aabb_max[1] += 40\n # when the car is driving uphills\n aabb_max[2] = min(aabb_max[2] + 20, 20)\n\n # for waymo, there will be a lot of waste of space because we don't have images in the back,\n # it's more reasonable to extend the aabb only by a small amount, e.g., 5 meters\n # we use 40 meters here for a more general case\n aabb_min[0] -= 40\n aabb_min[1] -= 40\n # when a car is driving downhills\n aabb_min[2] = max(aabb_min[2] - 5, -5)\n aabb = torch.tensor([*aabb_min, *aabb_max])\n logger.info(f\"[Pixel] Auto AABB from camera: {aabb}\")\n return aabb\n\n def get_features(\n self,\n img_id,\n y: Tensor,\n x: Tensor,\n downscale: Union[float, Tuple[float, float]] = 1.0,\n ) -> Tensor:\n \"\"\"\n Get the features at the given pixel coordinates.\n Args:\n img_id: the image index.\n y: the vertical coordinates of the pixels, shape: (num_rays,)\n x: the horizontal coordinates of the pixels, shape: (num_rays,)\n downscale: the downscale factor of the features.\n If it's a float, we use the same downscale factor for both height and width.\n If it's a tuple, we use the first value as the downscale factor for height\n and the second value as the downscale factor for width.\n Returns:\n features: the features at the given pixel coordinates.\n shape: (num_rays, feat_dim)\n \"\"\"\n if isinstance(downscale, float):\n downscale = (downscale, downscale)\n # we compute the nearest DINO feature for each pixel\n # map (x, y) in the (W, H) space to (x * dino_scale[0], y * dino_scale[1]) in the (W//patch_size, H//patch_size) space\n dino_y = (y * downscale[0]).long()\n dino_x = (x * downscale[1]).long()\n # dino_feats are in CPU memory (because they are huge), so we need to move them to GPU\n dino_feat = self.features[img_id, dino_y.cpu(), dino_x.cpu()]\n return dino_feat\n\n def build_pixel_error_buffer(self) -> None:\n \"\"\"\n Build the pixel error buffer.\n \"\"\"\n if self.buffer_ratio > 0:\n # shape: (num_imgs, H // buffer_downscale, W // buffer_downscale)\n self.pixel_error_maps = torch.ones(\n (\n len(self.cam_to_worlds),\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n ),\n dtype=torch.float32,\n device=self.device,\n )\n logger.info(\n f\"Successfully built pixel error buffer (log2(num_pixels) = {np.log2(len(self.pixel_error_maps.reshape(-1))):.2f}).\"\n )\n else:\n logger.info(\"Not building pixel error buffer because buffer_ratio <= 0.\")\n\n def update_pixel_error_maps(self, render_results: Dict[str, Tensor]) -> None:\n \"\"\"\n Update the pixel error buffer with the given render results.\n \"\"\"\n if self.pixel_error_maps is None:\n logger.info(\"Skipping pixel error buffer update because it's not built.\")\n return\n gt_rgbs = render_results[\"gt_rgbs\"]\n pred_rgbs = render_results[\"rgbs\"]\n gt_rgbs = torch.from_numpy(np.stack(gt_rgbs, axis=0))\n pred_rgbs = torch.from_numpy(np.stack(pred_rgbs, axis=0))\n pixel_error_maps = torch.abs(gt_rgbs - pred_rgbs).mean(dim=-1)\n assert pixel_error_maps.shape == self.pixel_error_maps.shape\n if \"dynamic_opacities\" in render_results:\n if len(render_results[\"dynamic_opacities\"]) > 0:\n dynamic_opacity = render_results[\"dynamic_opacities\"]\n dynamic_opacity = torch.from_numpy(np.stack(dynamic_opacity, axis=0))\n # we prioritize the dynamic objects by multiplying the error by 5\n pixel_error_maps[dynamic_opacity > 0.1] *= 5\n # update the pixel error buffer\n self.pixel_error_maps: Tensor = pixel_error_maps.to(self.device)\n # normalize the pixel error buffer to [0, 1]\n self.pixel_error_maps = (\n self.pixel_error_maps - self.pixel_error_maps.min()\n ) / (self.pixel_error_maps.max() - self.pixel_error_maps.min())\n self.pixel_error_buffered = True\n logger.info(\"Successfully updated pixel error buffer\")\n\n def visualize_pixel_sample_weights(self, indices: List[int]) -> np.ndarray:\n \"\"\"\n Visualize the pixel sample weights.\n Args:\n indices: the image indices to visualize.\n Returns:\n frames: the pixel sample weights of the given image.\n shape: (len(indices) // cams, H, num_cams * W, 3)\n \"\"\"\n frames = (\n self.pixel_error_maps.detach()\n .cpu()\n .numpy()\n .reshape(\n self.num_imgs,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )[indices]\n )\n frames = [np.stack([frame, frame, frame], axis=-1) for frame in frames]\n return np.uint8(np.concatenate(frames, axis=1) * 255)\n\n def get_pixel_sample_weights_video(self) -> List[np.ndarray]:\n \"\"\"\n Get the pixel sample weights video.\n Returns:\n frames: the pixel sample weights video.\n shape: (num_imgs // cams, H, num_cams * W, 3)\n \"\"\"\n assert self.buffer_ratio > 0, \"buffer_ratio must be > 0\"\n maps = []\n loss_maps = (\n self.pixel_error_maps.detach()\n .cpu()\n .numpy()\n .reshape(\n self.num_imgs,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )\n )\n for i in range(self.num_imgs):\n maps.append(loss_maps[i])\n return maps\n\n def sample_important_rays(\n self, num_rays, img_candidate_indices: Tensor = None\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"\n Sample rays coordinates from the given images based on the pixel error buffer.\n Args:\n num_rays: the number of rays to sample.\n img_candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n img_id: the image indices of the sampled rays.\n shape: (num_rays,)\n y: the vertical coordinates of the sampled rays.\n shape: (num_rays,)\n x: the horizontal coordinates of the sampled rays.\n shape: (num_rays,)\n \"\"\"\n assert self.pixel_error_buffered, \"Pixel error buffer not built.\"\n # if img_candidate_indices is None, use all image indices\n if img_candidate_indices is None:\n img_candidate_indices = torch.arange(len(self.images)).to(self.device)\n if not isinstance(img_candidate_indices, Tensor):\n img_candidate_indices = torch.tensor(img_candidate_indices).to(self.device)\n sampled_indices = torch.multinomial(\n self.pixel_error_maps[img_candidate_indices].reshape(-1),\n num_rays,\n replacement=False,\n )\n # convert the sampled 1d indices to (img_idx, y, x)\n img_idx, y, x = idx_to_3d(\n sampled_indices,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )\n img_idx = img_candidate_indices[img_idx]\n\n # Upscale to the original resolution\n y, x = (y * self.buffer_downscale).long(), (x * self.buffer_downscale).long()\n\n # Add a random offset to avoid sampling the same pixel\n y += torch.randint(\n 0, self.buffer_downscale, (num_rays,), device=self.images.device\n )\n x += torch.randint(\n 0, self.buffer_downscale, (num_rays,), device=self.images.device\n )\n # Clamp to ensure coordinates don't exceed the image bounds\n y = torch.clamp(y, 0, self.HEIGHT - 1)\n x = torch.clamp(x, 0, self.WIDTH - 1)\n return img_idx, y, x\n\n def sample_uniform_rays(\n self,\n num_rays: int,\n img_candidate_indices: Tensor = None,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"\n Sample rays coordinates uniformly from the given images.\n Args:\n num_rays: the number of rays to sample.\n img_candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n img_id: the image indices of the sampled rays.\n shape: (num_rays,)\n y: the vertical coordinates of the sampled rays.\n shape: (num_rays,)\n x: the horizontal coordinates of the sampled rays.\n shape: (num_rays,)\n \"\"\"\n # if img_candidate_indices is None, use all image indices\n if img_candidate_indices is None:\n img_candidate_indices = torch.arange(len(self.images)).to(self.device)\n if not isinstance(img_candidate_indices, Tensor):\n img_candidate_indices = torch.tensor(img_candidate_indices).to(self.device)\n # sample random index based on img_candidate_indices\n random_idx = torch.randint(\n 0,\n len(img_candidate_indices),\n size=(num_rays,),\n device=self.device,\n )\n img_id = img_candidate_indices[random_idx]\n\n # sample pixels\n x = torch.randint(\n 0,\n self.WIDTH,\n size=(num_rays,),\n device=self.device,\n )\n y = torch.randint(\n 0,\n self.HEIGHT,\n size=(num_rays,),\n device=self.device,\n )\n x, y = x.long(), y.long()\n return img_id, y, x\n\n def get_train_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Get a batch of rays for training.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n rgb, sky_mask, dynamic_mask, features = None, None, None, None\n pixel_coords, normalized_timestamps = None, None\n if self.buffer_ratio > 0 and self.pixel_error_buffered:\n num_roi_rays = int(num_rays * self.buffer_ratio)\n num_random_rays = num_rays - num_roi_rays\n random_img_idx, random_y, random_x = self.sample_uniform_rays(\n num_random_rays, candidate_indices\n )\n roi_img_idx, roi_y, roi_x = self.sample_important_rays(\n num_roi_rays, candidate_indices\n )\n img_idx = torch.cat([random_img_idx, roi_img_idx], dim=0)\n y = torch.cat([random_y, roi_y], dim=0)\n x = torch.cat([random_x, roi_x], dim=0)\n else:\n img_idx, y, x = self.sample_uniform_rays(\n num_rays=num_rays, img_candidate_indices=candidate_indices\n )\n pixel_coords = torch.stack([y / self.HEIGHT, x / self.WIDTH], dim=-1)\n if self.images is not None:\n rgb = self.images[img_idx, y, x]\n if self.sky_masks is not None:\n sky_mask = self.sky_masks[img_idx, y, x]\n if self.dynamic_masks is not None:\n dynamic_mask = self.dynamic_masks[img_idx, y, x].float()\n if self.features is not None:\n features = self.get_features(\n img_idx, y, x, downscale=self.featmap_downscale_factor\n )\n if self.normalized_timestamps is not None:\n normalized_timestamps = self.normalized_timestamps[img_idx]\n if self.cam_ids is not None:\n camera_id = self.cam_ids[img_idx]\n image_id = torch.ones_like(x) * img_idx\n c2w = self.cam_to_worlds[img_idx]\n intrinsics = self.intrinsics[img_idx]\n origins, viewdirs, direction_norm = get_rays(x, y, c2w, intrinsics)\n data = {\n \"origins\": origins,\n \"viewdirs\": viewdirs,\n \"direction_norms\": direction_norm,\n \"pixel_coords\": pixel_coords,\n \"normed_timestamps\": normalized_timestamps,\n \"img_idx\": image_id,\n \"cam_idx\": camera_id,\n \"pixels\": rgb,\n \"sky_masks\": sky_mask,\n \"dynamic_masks\": dynamic_mask,\n \"features\": features,\n }\n return {k: v for k, v in data.items() if v is not None}\n\n def get_render_rays(self, img_idx: int) -> Dict[str, Tensor]:\n \"\"\"\n Get the rays for rendering the given image index.\n Args:\n img_idx: the image index.\n Returns:\n a dict containing the rays for rendering the given image index.\n \"\"\"\n rgb, sky_mask, dynamic_mask, features = None, None, None, None\n pixel_coords, normalized_timestamps = None, None\n if self.images is not None:\n rgb = self.images[img_idx]\n if self.downscale_factor != 1.0:\n rgb = (\n torch.nn.functional.interpolate(\n rgb.unsqueeze(0).permute(0, 3, 1, 2),\n scale_factor=self.downscale_factor,\n mode=\"bicubic\",\n antialias=True,\n )\n .squeeze(0)\n .permute(1, 2, 0)\n )\n img_height, img_width = rgb.shape[:2]\n else:\n img_height, img_width = self.HEIGHT, self.WIDTH\n\n x, y = torch.meshgrid(\n torch.arange(img_width),\n torch.arange(img_height),\n indexing=\"xy\",\n )\n x, y = x.flatten(), y.flatten()\n x, y = x.to(self.device), y.to(self.device)\n # pixel coordinates\n pixel_coords = (\n torch.stack([y / img_height, x / img_width], dim=-1)\n .float()\n .reshape(img_height, img_width, 2)\n )\n\n if self.sky_masks is not None:\n sky_mask = self.sky_masks[img_idx]\n if self.downscale_factor != 1.0:\n sky_mask = (\n torch.nn.functional.interpolate(\n sky_mask.unsqueeze(0).unsqueeze(0),\n scale_factor=self.downscale_factor,\n mode=\"nearest\",\n )\n .squeeze(0)\n .squeeze(0)\n )\n if self.dynamic_masks is not None:\n dynamic_mask = self.dynamic_masks[img_idx].float()\n if self.downscale_factor != 1.0:\n dynamic_mask = (\n torch.nn.functional.interpolate(\n dynamic_mask.unsqueeze(0).unsqueeze(0),\n scale_factor=self.downscale_factor,\n mode=\"nearest\",\n )\n .squeeze(0)\n .squeeze(0)\n )\n if self.features is not None:\n features = self.get_features(\n img_idx,\n y,\n x,\n downscale=(\n self.featmap_downscale_factor[0] / self.downscale_factor,\n self.featmap_downscale_factor[1] / self.downscale_factor,\n ),\n ).reshape(img_height, img_width, -1)\n\n if self.normalized_timestamps is not None:\n normalized_timestamps = torch.full(\n (img_height, img_width),\n self.normalized_timestamps[img_idx],\n dtype=torch.float32,\n )\n if self.cam_ids is not None:\n camera_id = torch.full(\n (img_height, img_width),\n self.cam_ids[img_idx],\n dtype=torch.long,\n )\n image_id = torch.full(\n (img_height, img_width),\n img_idx,\n dtype=torch.long,\n )\n c2w = self.cam_to_worlds[img_idx]\n intrinsics = self.intrinsics[img_idx] * self.downscale_factor\n intrinsics[2, 2] = 1.0\n origins, viewdirs, direction_norm = get_rays(x, y, c2w, intrinsics)\n origins = origins.reshape(img_height, img_width, 3)\n viewdirs = viewdirs.reshape(img_height, img_width, 3)\n direction_norm = direction_norm.reshape(img_height, img_width, 1)\n data = {\n \"origins\": origins,\n \"viewdirs\": viewdirs,\n \"direction_norm\": direction_norm,\n \"pixel_coords\": pixel_coords,\n \"normed_timestamps\": normalized_timestamps,\n \"img_idx\": image_id,\n \"cam_idx\": camera_id,\n \"pixels\": rgb,\n \"sky_masks\": sky_mask,\n \"dynamic_masks\": dynamic_mask,\n \"features\": features,\n }\n return {k: v for k, v in data.items() if v is not None}\n\n @property\n def num_cams(self) -> int:\n \"\"\"\n Returns:\n the number of cameras in the dataset\n \"\"\"\n return self.data_cfg.num_cams\n\n @property\n def num_imgs(self) -> int:\n \"\"\"\n Returns:\n the number of images in the dataset\n \"\"\"\n return len(self.cam_to_worlds)\n\n @property\n def num_timesteps(self) -> int:\n \"\"\"\n Returns:\n the number of image timesteps in the dataset\n \"\"\"\n return len(self.timesteps.unique())\n\n @property\n def timesteps(self) -> Tensor:\n \"\"\"\n Returns:\n the integer timestep indices of all images,\n shape: (num_imgs,)\n Note:\n the difference between timestamps and timesteps is that\n timestamps are the actual timestamps (minus 1e9) of images\n while timesteps are the integer timestep indices of images.\n \"\"\"\n return self._timesteps\n\n @property\n def timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the actual timestamps (minus 1e9) of all images,\n shape: (num_imgs,)\n \"\"\"\n return self._timestamps\n\n @property\n def normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the normalized timestamps of all images\n (normalized to the range [0, 1]),\n shape: (num_imgs,)\n \"\"\"\n return self._normalized_timestamps\n\n @property\n def unique_normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the unique normalized timestamps of all images\n (normalized to the range [0, 1]).\n shape: (num_timesteps,)\n \"\"\"\n return self._unique_normalized_timestamps\n\n def register_normalized_timestamps(self, normalized_timestamps: Tensor) -> None:\n \"\"\"\n Register the normalized timestamps of all images.\n Args:\n normalized_timestamps: the normalized timestamps of all images\n (normalized to the range [0, 1]).\n shape: (num_imgs,)\n Note:\n we normalize the image timestamps together with the lidar timestamps,\n so that the both the image and lidar timestamps are in the range [0, 1].\n \"\"\"\n assert normalized_timestamps.shape[0] == len(\n self.img_filepaths\n ), \"The number of normalized timestamps must match the number of images.\"\n assert (\n normalized_timestamps.min() >= 0 and normalized_timestamps.max() <= 1\n ), \"The normalized timestamps must be in the range [0, 1].\"\n self._normalized_timestamps = normalized_timestamps.to(self.device)\n self._unique_normalized_timestamps = self._normalized_timestamps.unique()\n\n def find_closest_timestep(self, normed_timestamp: float) -> int:\n \"\"\"\n Find the closest timestep to the given timestamp.\n Args:\n normed_timestamp: the normalized timestamp to find the closest timestep for.\n Returns:\n the closest timestep to the given timestamp.\n \"\"\"\n return torch.argmin(\n torch.abs(self.unique_normalized_timestamps - normed_timestamp)\n )\n\n @property\n def HEIGHT(self) -> int:\n return self.data_cfg.load_size[0]\n\n @property\n def WIDTH(self) -> int:\n return self.data_cfg.load_size[1]\n\n @property\n def downscale_factor(self) -> float:\n \"\"\"\n Returns:\n downscale_factor: the downscale factor of the images\n \"\"\"\n return self._downscale_factor\n\n def update_downscale_factor(self, downscale: float) -> None:\n \"\"\"\n Args:\n downscale: the new downscale factor\n Updates the downscale factor\n \"\"\"\n self._old_downscale_factor = self._downscale_factor\n self._downscale_factor = downscale\n\n def reset_downscale_factor(self) -> None:\n \"\"\"\n Resets the downscale factor to the original value\n \"\"\"\n self._downscale_factor = self._old_downscale_factor\n\n @property\n def buffer_downscale(self) -> float:\n \"\"\"\n Returns:\n buffer_downscale: the downscale factor of the pixel error buffer\n \"\"\"\n return self.data_cfg.sampler.buffer_downscale\n\n @property\n def buffer_ratio(self) -> float:\n \"\"\"\n Returns:\n buffer_ratio: the ratio of the rays sampled from the pixel error buffer\n \"\"\"\n return self.data_cfg.sampler.buffer_ratio" } ]
from typing import List, Union from .lidar_source import SceneLidarSource from .pixel_source import ScenePixelSource import torch import torch.nn.functional as F
12,378
class SplitWrapper(torch.utils.data.Dataset): # a sufficiently large number to make sure we don't run out of data _num_iters = 1000000 def __init__( self,
class SplitWrapper(torch.utils.data.Dataset): # a sufficiently large number to make sure we don't run out of data _num_iters = 1000000 def __init__( self,
datasource: Union[ScenePixelSource, SceneLidarSource],
1
2023-10-11 20:56:27+00:00
16k
alibaba-damo-academy/FunCodec
funcodec/models/decoder/contextual_decoder.py
[ { "identifier": "utils", "path": "funcodec/modules/streaming_utils/utils.py", "snippet": "def sequence_mask(lengths, maxlen=None, dtype=torch.float32, device=None):\ndef apply_cmvn(inputs, mvn):\ndef drop_and_add(inputs: torch.Tensor,\n outputs: torch.Tensor,\n training: bool,\n dropout_rate: float = 0.1,\n stoch_layer_coeff: float = 1.0):\ndef proc_tf_vocab(vocab_path):\ndef gen_config_for_tfmodel(config_path, vocab_path, output_dir):\n\tdef ignore_aliases(self, data):\ndef yaml_no_alias_safe_dump(data, stream=None, **kwargs):\nclass NoAliasSafeDumper(yaml.SafeDumper):" }, { "identifier": "BaseTransformerDecoder", "path": "funcodec/models/decoder/transformer_decoder.py", "snippet": "class BaseTransformerDecoder(AbsDecoder, BatchScorerInterface):\n \"\"\"Base class of Transfomer decoder module.\n\n Args:\n vocab_size: output dim\n encoder_output_size: dimension of attention\n attention_heads: the number of heads of multi head attention\n linear_units: the number of units of position-wise feed forward\n num_blocks: the number of decoder blocks\n dropout_rate: dropout rate\n self_attention_dropout_rate: dropout rate for attention\n input_layer: input layer type\n use_output_layer: whether to use output layer\n pos_enc_class: PositionalEncoding or ScaledPositionalEncoding\n normalize_before: whether to use layer_norm before the first block\n concat_after: whether to concat attention layer's input and output\n if True, additional linear will be applied.\n i.e. x -> x + linear(concat(x, att(x)))\n if False, no additional linear will be applied.\n i.e. x -> x + att(x)\n \"\"\"\n\n def __init__(\n self,\n vocab_size: int,\n encoder_output_size: int,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n input_layer: str = \"embed\",\n use_output_layer: bool = True,\n pos_enc_class=PositionalEncoding,\n normalize_before: bool = True,\n causal=True,\n ):\n assert check_argument_types()\n super().__init__()\n attention_dim = encoder_output_size\n self.causal = causal\n\n if input_layer == \"embed\":\n self.embed = torch.nn.Sequential(\n torch.nn.Embedding(vocab_size, attention_dim),\n pos_enc_class(attention_dim, positional_dropout_rate),\n )\n elif input_layer == \"linear\":\n self.embed = torch.nn.Sequential(\n torch.nn.Linear(vocab_size, attention_dim),\n torch.nn.LayerNorm(attention_dim),\n torch.nn.Dropout(dropout_rate),\n torch.nn.ReLU(),\n pos_enc_class(attention_dim, positional_dropout_rate),\n )\n else:\n raise ValueError(f\"only 'embed' or 'linear' is supported: {input_layer}\")\n\n self.normalize_before = normalize_before\n if self.normalize_before:\n self.after_norm = LayerNorm(attention_dim)\n if use_output_layer:\n self.output_layer = torch.nn.Linear(attention_dim, vocab_size)\n else:\n self.output_layer = None\n\n # Must set by the inheritance\n self.decoders = None\n\n def forward(\n self,\n hs_pad: torch.Tensor,\n hlens: torch.Tensor,\n ys_in_pad: torch.Tensor,\n ys_in_lens: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Forward decoder.\n\n Args:\n hs_pad: encoded memory, float32 (batch, maxlen_in, feat)\n hlens: (batch)\n ys_in_pad:\n input token ids, int64 (batch, maxlen_out)\n if input_layer == \"embed\"\n input tensor (batch, maxlen_out, #mels) in the other cases\n ys_in_lens: (batch)\n Returns:\n (tuple): tuple containing:\n\n x: decoded token score before softmax (batch, maxlen_out, token)\n if use_output_layer is True,\n olens: (batch, )\n \"\"\"\n tgt = ys_in_pad\n # tgt_mask: (B, 1, L)\n tgt_mask = (~make_pad_mask(ys_in_lens)[:, None, :]).to(tgt.device)\n if self.causal:\n # m: (1, L, L)\n m = subsequent_mask(tgt_mask.size(-1), device=tgt_mask.device).unsqueeze(0)\n # tgt_mask: (B, L, L)\n tgt_mask = tgt_mask & m\n\n memory = hs_pad\n memory_mask = (~make_pad_mask(hlens, maxlen=memory.size(1)))[:, None, :].to(\n memory.device\n )\n # Padding for Longformer\n if memory_mask.shape[-1] != memory.shape[1]:\n padlen = memory.shape[1] - memory_mask.shape[-1]\n memory_mask = torch.nn.functional.pad(\n memory_mask, (0, padlen), \"constant\", False\n )\n\n x = self.embed(tgt)\n x, tgt_mask, memory, memory_mask = self.decoders(\n x, tgt_mask, memory, memory_mask\n )\n if self.normalize_before:\n x = self.after_norm(x)\n if self.output_layer is not None:\n x = self.output_layer(x)\n\n olens = tgt_mask.sum(1)\n return x, olens\n\n def forward_one_step(\n self,\n tgt: torch.Tensor,\n tgt_mask: torch.Tensor,\n memory: torch.Tensor,\n cache: List[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, List[torch.Tensor]]:\n \"\"\"Forward one step.\n\n Args:\n tgt: input token ids, int64 (batch, maxlen_out)\n tgt_mask: input token mask, (batch, maxlen_out)\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (include 1.2)\n memory: encoded memory, float32 (batch, maxlen_in, feat)\n cache: cached output list of (batch, max_time_out-1, size)\n Returns:\n y, cache: NN output value and cache per `self.decoders`.\n y.shape` is (batch, maxlen_out, token)\n \"\"\"\n x = self.embed(tgt)\n if cache is None:\n cache = [None] * len(self.decoders)\n new_cache = []\n for c, decoder in zip(cache, self.decoders):\n x, tgt_mask, memory, memory_mask = decoder(\n x, tgt_mask, memory, None, cache=c\n )\n new_cache.append(x)\n\n if self.normalize_before:\n y = self.after_norm(x[:, -1])\n else:\n y = x[:, -1]\n if self.output_layer is not None:\n y = torch.log_softmax(self.output_layer(y), dim=-1)\n\n return y, new_cache\n\n def score(self, ys, state, x):\n \"\"\"Score.\"\"\"\n ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)\n logp, state = self.forward_one_step(\n ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state\n )\n return logp.squeeze(0), state\n\n def batch_score(\n self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor\n ) -> Tuple[torch.Tensor, List[Any]]:\n \"\"\"Score new token batch.\n\n Args:\n ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).\n states (List[Any]): Scorer states for prefix tokens.\n xs (torch.Tensor):\n The encoder feature that generates ys (n_batch, xlen, n_feat).\n\n Returns:\n tuple[torch.Tensor, List[Any]]: Tuple of\n batchfied scores for next token with shape of `(n_batch, n_vocab)`\n and next state list for ys.\n\n \"\"\"\n # merge states\n n_batch = len(ys)\n n_layers = len(self.decoders)\n if states[0] is None:\n batch_state = None\n else:\n # transpose state of [batch, layer] into [layer, batch]\n batch_state = [\n torch.stack([states[b][i] for b in range(n_batch)])\n for i in range(n_layers)\n ]\n\n # batch decoding\n ys_mask = subsequent_mask(ys.size(-1), device=xs.device).unsqueeze(0)\n logp, states = self.forward_one_step(ys, ys_mask, xs, cache=batch_state)\n\n # transpose state of [layer, batch] into [batch, layer]\n state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]\n return logp, state_list" }, { "identifier": "MultiHeadedAttentionSANMDecoder", "path": "funcodec/modules/attention.py", "snippet": "class MultiHeadedAttentionSANMDecoder(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_feat, dropout_rate, kernel_size, sanm_shfit=0):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(MultiHeadedAttentionSANMDecoder, self).__init__()\n\n self.dropout = nn.Dropout(p=dropout_rate)\n\n self.fsmn_block = nn.Conv1d(n_feat, n_feat,\n kernel_size, stride=1, padding=0, groups=n_feat, bias=False)\n # padding\n # padding\n left_padding = (kernel_size - 1) // 2\n if sanm_shfit > 0:\n left_padding = left_padding + sanm_shfit\n right_padding = kernel_size - 1 - left_padding\n self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)\n self.kernel_size = kernel_size\n\n def forward(self, inputs, mask, cache=None, mask_shfit_chunk=None):\n '''\n :param x: (#batch, time1, size).\n :param mask: Mask tensor (#batch, 1, time)\n :return:\n '''\n # print(\"in fsmn, inputs\", inputs.size())\n b, t, d = inputs.size()\n # logging.info(\n # \"mask: {}\".format(mask.size()))\n if mask is not None:\n mask = torch.reshape(mask, (b ,-1, 1))\n # logging.info(\"in fsmn, mask: {}, {}\".format(mask.size(), mask[0:100:50, :, :]))\n if mask_shfit_chunk is not None:\n # logging.info(\"in fsmn, mask_fsmn: {}, {}\".format(mask_shfit_chunk.size(), mask_shfit_chunk[0:100:50, :, :]))\n mask = mask * mask_shfit_chunk\n # logging.info(\"in fsmn, mask_after_fsmn: {}, {}\".format(mask.size(), mask[0:100:50, :, :]))\n # print(\"in fsmn, mask\", mask.size())\n # print(\"in fsmn, inputs\", inputs.size())\n inputs = inputs * mask\n\n x = inputs.transpose(1, 2)\n b, d, t = x.size()\n if cache is None:\n # print(\"in fsmn, cache is None, x\", x.size())\n\n x = self.pad_fn(x)\n if not self.training and t <= 1:\n cache = x\n else:\n # print(\"in fsmn, cache is not None, x\", x.size())\n # x = torch.cat((x, cache), dim=2)[:, :, :-1]\n # if t < self.kernel_size:\n # x = self.pad_fn(x)\n x = torch.cat((cache[:, :, 1:], x), dim=2)\n x = x[:, :, -self.kernel_size:]\n # print(\"in fsmn, cache is not None, x_cat\", x.size())\n cache = x\n x = self.fsmn_block(x)\n x = x.transpose(1, 2)\n # print(\"in fsmn, fsmn_out\", x.size())\n if x.size(1) != inputs.size(1):\n inputs = inputs[:, -1, :]\n\n x = x + inputs\n x = self.dropout(x)\n if mask is not None:\n x = x * mask\n return x, cache" }, { "identifier": "MultiHeadedAttentionCrossAtt", "path": "funcodec/modules/attention.py", "snippet": "class MultiHeadedAttentionCrossAtt(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, encoder_output_size=None):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(MultiHeadedAttentionCrossAtt, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n # self.linear_k = nn.Linear(n_feat, n_feat)\n # self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_k_v = nn.Linear(n_feat if encoder_output_size is None else encoder_output_size, n_feat*2)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(self, x, memory):\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\n\n \"\"\"\n\n # print(\"in forward_qkv, x\", x.size())\n b = x.size(0)\n q = self.linear_q(x)\n q_h = torch.reshape(q, (b, -1, self.h, self.d_k)).transpose(1, 2) # (batch, head, time1, d_k)\n\n k_v = self.linear_k_v(memory)\n k, v = torch.split(k_v, int(self.h*self.d_k), dim=-1)\n k_h = torch.reshape(k, (b, -1, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)\n v_h = torch.reshape(v, (b, -1, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)\n\n\n return q_h, k_h, v_h\n\n def forward_attention(self, value, scores, mask):\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n min_value = float(\n numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min\n )\n # logging.info(\n # \"scores: {}, mask_size: {}\".format(scores.size(), mask.size()))\n scores = scores.masked_fill(mask, min_value)\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, x, memory, memory_mask):\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q_h, k_h, v_h = self.forward_qkv(x, memory)\n q_h = q_h * self.d_k ** (-0.5)\n scores = torch.matmul(q_h, k_h.transpose(-2, -1))\n return self.forward_attention(v_h, scores, memory_mask)" }, { "identifier": "PositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\n \"\"\"Positional encoding.\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n reverse (bool): Whether to reverse the input position. Only for\n the class LegacyRelPositionalEncoding. We remove it in the current\n class RelPositionalEncoding.\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super(PositionalEncoding, self).__init__()\n self.d_model = d_model\n self.reverse = reverse\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.pe = None\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\n self._register_load_state_dict_pre_hook(_pre_hook)\n\n def extend_pe(self, x):\n \"\"\"Reset the positional encodings.\"\"\"\n if self.pe is not None:\n if self.pe.size(1) >= x.size(1):\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\n return\n pe = torch.zeros(x.size(1), self.d_model)\n if self.reverse:\n position = torch.arange(\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\n ).unsqueeze(1)\n else:\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.d_model)\n )\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.pe = pe.to(device=x.device, dtype=x.dtype)\n\n def forward(self, x: torch.Tensor):\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n \"\"\"\n self.extend_pe(x)\n x = x * self.xscale + self.pe[:, : x.size(1)]\n return self.dropout(x)" }, { "identifier": "LayerNorm", "path": "funcodec/modules/layer_norm.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\n \"\"\"Layer normalization module.\n\n Args:\n nout (int): Output dim size.\n dim (int): Dimension to be normalized.\n\n \"\"\"\n\n def __init__(self, nout, dim=-1):\n \"\"\"Construct an LayerNorm object.\"\"\"\n super(LayerNorm, self).__init__(nout, eps=1e-12)\n self.dim = dim\n\n def forward(self, x):\n \"\"\"Apply layer normalization.\n\n Args:\n x (torch.Tensor): Input tensor.\n\n Returns:\n torch.Tensor: Normalized tensor.\n\n \"\"\"\n if self.dim == -1:\n return super(LayerNorm, self).forward(x)\n return (\n super(LayerNorm, self)\n .forward(x.transpose(self.dim, -1))\n .transpose(self.dim, -1)\n )" }, { "identifier": "PositionwiseFeedForwardDecoderSANM", "path": "funcodec/modules/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForwardDecoderSANM(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, idim, hidden_units, dropout_rate, adim=None, activation=torch.nn.ReLU()):\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForwardDecoderSANM, self).__init__()\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.w_2 = torch.nn.Linear(hidden_units, idim if adim is None else adim, bias=False)\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.activation = activation\n self.norm = LayerNorm(hidden_units)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n return self.w_2(self.norm(self.dropout(self.activation(self.w_1(x)))))" }, { "identifier": "repeat", "path": "funcodec/modules/repeat.py", "snippet": "def repeat(N, fn):\n \"\"\"Repeat module N times.\n\n Args:\n N (int): Number of repeat time.\n fn (Callable): Function to generate module.\n\n Returns:\n MultiSequential: Repeated model instance.\n\n \"\"\"\n return MultiSequential(*[fn(n) for n in range(N)])" }, { "identifier": "DecoderLayerSANM", "path": "funcodec/models/decoder/sanm_decoder.py", "snippet": "class DecoderLayerSANM(nn.Module):\n \"\"\"Single decoder layer module.\n\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` instance can be used as the argument.\n src_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` instance can be used as the argument.\n feed_forward (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\n can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool): Whether to use layer_norm before the first block.\n concat_after (bool): Whether to concat attention layer's input and output.\n if True, additional linear will be applied.\n i.e. x -> x + linear(concat(x, att(x)))\n if False, no additional linear will be applied. i.e. x -> x + att(x)\n\n\n \"\"\"\n\n def __init__(\n self,\n size,\n self_attn,\n src_attn,\n feed_forward,\n dropout_rate,\n normalize_before=True,\n concat_after=False,\n ):\n \"\"\"Construct an DecoderLayer object.\"\"\"\n super(DecoderLayerSANM, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.norm1 = LayerNorm(size)\n if self_attn is not None:\n self.norm2 = LayerNorm(size)\n if src_attn is not None:\n self.norm3 = LayerNorm(size)\n self.dropout = nn.Dropout(dropout_rate)\n self.normalize_before = normalize_before\n self.concat_after = concat_after\n if self.concat_after:\n self.concat_linear1 = nn.Linear(size + size, size)\n self.concat_linear2 = nn.Linear(size + size, size)\n\n def forward(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):\n \"\"\"Compute decoded features.\n\n Args:\n tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).\n tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).\n memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).\n memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).\n cache (List[torch.Tensor]): List of cached tensors.\n Each tensor shape should be (#batch, maxlen_out - 1, size).\n\n Returns:\n torch.Tensor: Output tensor(#batch, maxlen_out, size).\n torch.Tensor: Mask for output tensor (#batch, maxlen_out).\n torch.Tensor: Encoded memory (#batch, maxlen_in, size).\n torch.Tensor: Encoded memory mask (#batch, maxlen_in).\n\n \"\"\"\n # tgt = self.dropout(tgt)\n residual = tgt\n if self.normalize_before:\n tgt = self.norm1(tgt)\n tgt = self.feed_forward(tgt)\n\n x = tgt\n if self.self_attn:\n if self.normalize_before:\n tgt = self.norm2(tgt)\n if self.training:\n cache = None\n x, cache = self.self_attn(tgt, tgt_mask, cache=cache)\n x = residual + self.dropout(x)\n\n if self.src_attn is not None:\n residual = x\n if self.normalize_before:\n x = self.norm3(x)\n\n x = residual + self.dropout(self.src_attn(x, memory, memory_mask))\n\n\n return x, tgt_mask, memory, memory_mask, cache" }, { "identifier": "ParaformerSANMDecoder", "path": "funcodec/models/decoder/sanm_decoder.py", "snippet": "class ParaformerSANMDecoder(BaseTransformerDecoder):\n \"\"\"\n author: Speech Lab, Alibaba Group, China\n Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition\n https://arxiv.org/abs/2006.01713\n \"\"\"\n def __init__(\n self,\n vocab_size: int,\n encoder_output_size: int,\n attention_heads: int = 4,\n linear_units: int = 2048,\n num_blocks: int = 6,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n self_attention_dropout_rate: float = 0.0,\n src_attention_dropout_rate: float = 0.0,\n input_layer: str = \"embed\",\n use_output_layer: bool = True,\n pos_enc_class=PositionalEncoding,\n normalize_before: bool = True,\n concat_after: bool = False,\n att_layer_num: int = 6,\n kernel_size: int = 21,\n sanm_shfit: int = 0,\n tf2torch_tensor_name_prefix_torch: str = \"decoder\",\n tf2torch_tensor_name_prefix_tf: str = \"seq2seq/decoder\",\n ):\n assert check_argument_types()\n super().__init__(\n vocab_size=vocab_size,\n encoder_output_size=encoder_output_size,\n dropout_rate=dropout_rate,\n positional_dropout_rate=positional_dropout_rate,\n input_layer=input_layer,\n use_output_layer=use_output_layer,\n pos_enc_class=pos_enc_class,\n normalize_before=normalize_before,\n )\n\n attention_dim = encoder_output_size\n\n if input_layer == \"embed\":\n self.embed = torch.nn.Sequential(\n torch.nn.Embedding(vocab_size, attention_dim),\n # pos_enc_class(attention_dim, positional_dropout_rate),\n )\n elif input_layer == \"linear\":\n self.embed = torch.nn.Sequential(\n torch.nn.Linear(vocab_size, attention_dim),\n torch.nn.LayerNorm(attention_dim),\n torch.nn.Dropout(dropout_rate),\n torch.nn.ReLU(),\n pos_enc_class(attention_dim, positional_dropout_rate),\n )\n else:\n raise ValueError(f\"only 'embed' or 'linear' is supported: {input_layer}\")\n\n self.normalize_before = normalize_before\n if self.normalize_before:\n self.after_norm = LayerNorm(attention_dim)\n if use_output_layer:\n self.output_layer = torch.nn.Linear(attention_dim, vocab_size)\n else:\n self.output_layer = None\n\n self.att_layer_num = att_layer_num\n self.num_blocks = num_blocks\n if sanm_shfit is None:\n sanm_shfit = (kernel_size - 1) // 2\n self.decoders = repeat(\n att_layer_num,\n lambda lnum: DecoderLayerSANM(\n attention_dim,\n MultiHeadedAttentionSANMDecoder(\n attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=sanm_shfit\n ),\n MultiHeadedAttentionCrossAtt(\n attention_heads, attention_dim, src_attention_dropout_rate\n ),\n PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n concat_after,\n ),\n )\n if num_blocks - att_layer_num <= 0:\n self.decoders2 = None\n else:\n self.decoders2 = repeat(\n num_blocks - att_layer_num,\n lambda lnum: DecoderLayerSANM(\n attention_dim,\n MultiHeadedAttentionSANMDecoder(\n attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=0\n ),\n None,\n PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n concat_after,\n ),\n )\n\n self.decoders3 = repeat(\n 1,\n lambda lnum: DecoderLayerSANM(\n attention_dim,\n None,\n None,\n PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n concat_after,\n ),\n )\n self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch\n self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf\n\n def forward(\n self,\n hs_pad: torch.Tensor,\n hlens: torch.Tensor,\n ys_in_pad: torch.Tensor,\n ys_in_lens: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Forward decoder.\n\n Args:\n hs_pad: encoded memory, float32 (batch, maxlen_in, feat)\n hlens: (batch)\n ys_in_pad:\n input token ids, int64 (batch, maxlen_out)\n if input_layer == \"embed\"\n input tensor (batch, maxlen_out, #mels) in the other cases\n ys_in_lens: (batch)\n Returns:\n (tuple): tuple containing:\n\n x: decoded token score before softmax (batch, maxlen_out, token)\n if use_output_layer is True,\n olens: (batch, )\n \"\"\"\n tgt = ys_in_pad\n tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]\n\n memory = hs_pad\n memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]\n\n x = tgt\n x, tgt_mask, memory, memory_mask, _ = self.decoders(\n x, tgt_mask, memory, memory_mask\n )\n if self.decoders2 is not None:\n x, tgt_mask, memory, memory_mask, _ = self.decoders2(\n x, tgt_mask, memory, memory_mask\n )\n x, tgt_mask, memory, memory_mask, _ = self.decoders3(\n x, tgt_mask, memory, memory_mask\n )\n if self.normalize_before:\n x = self.after_norm(x)\n if self.output_layer is not None:\n x = self.output_layer(x)\n\n olens = tgt_mask.sum(1)\n return x, olens\n\n def score(self, ys, state, x):\n \"\"\"Score.\"\"\"\n ys_mask = myutils.sequence_mask(torch.tensor([len(ys)], dtype=torch.int32), device=x.device)[:, :, None]\n logp, state = self.forward_one_step(\n ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state\n )\n return logp.squeeze(0), state\n\n def forward_one_step(\n self,\n tgt: torch.Tensor,\n tgt_mask: torch.Tensor,\n memory: torch.Tensor,\n cache: List[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, List[torch.Tensor]]:\n \"\"\"Forward one step.\n\n Args:\n tgt: input token ids, int64 (batch, maxlen_out)\n tgt_mask: input token mask, (batch, maxlen_out)\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (include 1.2)\n memory: encoded memory, float32 (batch, maxlen_in, feat)\n cache: cached output list of (batch, max_time_out-1, size)\n Returns:\n y, cache: NN output value and cache per `self.decoders`.\n y.shape` is (batch, maxlen_out, token)\n \"\"\"\n x = self.embed(tgt)\n if cache is None:\n cache_layer_num = len(self.decoders)\n if self.decoders2 is not None:\n cache_layer_num += len(self.decoders2)\n cache = [None] * cache_layer_num\n new_cache = []\n # for c, decoder in zip(cache, self.decoders):\n for i in range(self.att_layer_num):\n decoder = self.decoders[i]\n c = cache[i]\n x, tgt_mask, memory, memory_mask, c_ret = decoder(\n x, tgt_mask, memory, None, cache=c\n )\n new_cache.append(c_ret)\n\n if self.num_blocks - self.att_layer_num > 1:\n for i in range(self.num_blocks - self.att_layer_num):\n j = i + self.att_layer_num\n decoder = self.decoders2[i]\n c = cache[j]\n x, tgt_mask, memory, memory_mask, c_ret = decoder(\n x, tgt_mask, memory, None, cache=c\n )\n new_cache.append(c_ret)\n\n for decoder in self.decoders3:\n\n x, tgt_mask, memory, memory_mask, _ = decoder(\n x, tgt_mask, memory, None, cache=None\n )\n\n if self.normalize_before:\n y = self.after_norm(x[:, -1])\n else:\n y = x[:, -1]\n if self.output_layer is not None:\n y = torch.log_softmax(self.output_layer(y), dim=-1)\n\n return y, new_cache\n\n def gen_tf2torch_map_dict(self):\n \n tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch\n tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf\n map_dict_local = {\n \n ## decoder\n # ffn\n \"{}.decoders.layeridx.norm1.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders.layeridx.norm1.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders.layeridx.feed_forward.w_1.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (1024,256),(1,256,1024)\n \"{}.decoders.layeridx.feed_forward.w_1.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d/bias\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.decoders.layeridx.feed_forward.norm.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm_1/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.decoders.layeridx.feed_forward.norm.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm_1/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.decoders.layeridx.feed_forward.w_2.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d_1/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (256,1024),(1,1024,256)\n \n # fsmn\n \"{}.decoders.layeridx.norm2.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_memory_block/LayerNorm/gamma\".format(\n tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders.layeridx.norm2.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_memory_block/LayerNorm/beta\".format(\n tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders.layeridx.self_attn.fsmn_block.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/decoder_memory_block/depth_conv_w\".format(\n tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 2, 0),\n }, # (256,1,31),(1,31,256,1)\n # src att\n \"{}.decoders.layeridx.norm3.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/LayerNorm/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders.layeridx.norm3.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/LayerNorm/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders.layeridx.src_attn.linear_q.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/conv1d/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (256,256),(1,256,256)\n \"{}.decoders.layeridx.src_attn.linear_q.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/conv1d/bias\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders.layeridx.src_attn.linear_k_v.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_1/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (1024,256),(1,256,1024)\n \"{}.decoders.layeridx.src_attn.linear_k_v.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_1/bias\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.decoders.layeridx.src_attn.linear_out.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_2/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (256,256),(1,256,256)\n \"{}.decoders.layeridx.src_attn.linear_out.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_2/bias\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n # dnn\n \"{}.decoders3.layeridx.norm1.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_dnn_layer_layeridx/LayerNorm/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders3.layeridx.norm1.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_dnn_layer_layeridx/LayerNorm/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.decoders3.layeridx.feed_forward.w_1.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_dnn_layer_layeridx/conv1d/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (1024,256),(1,256,1024)\n \"{}.decoders3.layeridx.feed_forward.w_1.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_dnn_layer_layeridx/conv1d/bias\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.decoders3.layeridx.feed_forward.norm.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_dnn_layer_layeridx/LayerNorm_1/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.decoders3.layeridx.feed_forward.norm.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_dnn_layer_layeridx/LayerNorm_1/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.decoders3.layeridx.feed_forward.w_2.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/decoder_dnn_layer_layeridx/conv1d_1/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (256,1024),(1,1024,256)\n \n # embed_concat_ffn\n \"{}.embed_concat_ffn.layeridx.norm1.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/cif_concat/LayerNorm/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.embed_concat_ffn.layeridx.norm1.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/cif_concat/LayerNorm/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.embed_concat_ffn.layeridx.feed_forward.w_1.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/cif_concat/conv1d/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (1024,256),(1,256,1024)\n \"{}.embed_concat_ffn.layeridx.feed_forward.w_1.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/cif_concat/conv1d/bias\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.embed_concat_ffn.layeridx.feed_forward.norm.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/cif_concat/LayerNorm_1/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.embed_concat_ffn.layeridx.feed_forward.norm.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/cif_concat/LayerNorm_1/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (1024,),(1024,)\n \"{}.embed_concat_ffn.layeridx.feed_forward.w_2.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/cif_concat/conv1d_1/kernel\".format(tensor_name_prefix_tf),\n \"squeeze\": 0,\n \"transpose\": (1, 0),\n }, # (256,1024),(1,1024,256)\n \n # out norm\n \"{}.after_norm.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/LayerNorm/gamma\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \"{}.after_norm.bias\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/LayerNorm/beta\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (256,),(256,)\n \n # in embed\n \"{}.embed.0.weight\".format(tensor_name_prefix_torch):\n {\"name\": \"{}/w_embs\".format(tensor_name_prefix_tf),\n \"squeeze\": None,\n \"transpose\": None,\n }, # (4235,256),(4235,256)\n \n # out layer\n \"{}.output_layer.weight\".format(tensor_name_prefix_torch):\n {\"name\": [\"{}/dense/kernel\".format(tensor_name_prefix_tf), \"{}/w_embs\".format(tensor_name_prefix_tf)],\n \"squeeze\": [None, None],\n \"transpose\": [(1, 0), None],\n }, # (4235,256),(256,4235)\n \"{}.output_layer.bias\".format(tensor_name_prefix_torch):\n {\"name\": [\"{}/dense/bias\".format(tensor_name_prefix_tf),\n \"seq2seq/2bias\" if tensor_name_prefix_tf == \"seq2seq/decoder/inputter_1\" else \"seq2seq/bias\"],\n \"squeeze\": [None, None],\n \"transpose\": [None, None],\n }, # (4235,),(4235,)\n \n }\n return map_dict_local\n\n def convert_tf2torch(self,\n var_dict_tf,\n var_dict_torch,\n ):\n map_dict = self.gen_tf2torch_map_dict()\n var_dict_torch_update = dict()\n decoder_layeridx_sets = set()\n for name in sorted(var_dict_torch.keys(), reverse=False):\n names = name.split('.')\n if names[0] == self.tf2torch_tensor_name_prefix_torch:\n if names[1] == \"decoders\":\n layeridx = int(names[2])\n name_q = name.replace(\".{}.\".format(layeridx), \".layeridx.\")\n layeridx_bias = 0\n layeridx += layeridx_bias\n decoder_layeridx_sets.add(layeridx)\n if name_q in map_dict.keys():\n name_v = map_dict[name_q][\"name\"]\n name_tf = name_v.replace(\"layeridx\", \"{}\".format(layeridx))\n data_tf = var_dict_tf[name_tf]\n if map_dict[name_q][\"squeeze\"] is not None:\n data_tf = np.squeeze(data_tf, axis=map_dict[name_q][\"squeeze\"])\n if map_dict[name_q][\"transpose\"] is not None:\n data_tf = np.transpose(data_tf, map_dict[name_q][\"transpose\"])\n data_tf = torch.from_numpy(data_tf).type(torch.float32).to(\"cpu\")\n assert var_dict_torch[name].size() == data_tf.size(), \"{}, {}, {} != {}\".format(name, name_tf,\n var_dict_torch[\n name].size(),\n data_tf.size())\n var_dict_torch_update[name] = data_tf\n logging.info(\n \"torch tensor: {}, {}, loading from tf tensor: {}, {}\".format(name, data_tf.size(), name_v,\n var_dict_tf[name_tf].shape))\n \n elif names[1] == \"decoders2\":\n layeridx = int(names[2])\n name_q = name.replace(\".{}.\".format(layeridx), \".layeridx.\")\n name_q = name_q.replace(\"decoders2\", \"decoders\")\n layeridx_bias = len(decoder_layeridx_sets)\n \n layeridx += layeridx_bias\n if \"decoders.\" in name:\n decoder_layeridx_sets.add(layeridx)\n if name_q in map_dict.keys():\n name_v = map_dict[name_q][\"name\"]\n name_tf = name_v.replace(\"layeridx\", \"{}\".format(layeridx))\n data_tf = var_dict_tf[name_tf]\n if map_dict[name_q][\"squeeze\"] is not None:\n data_tf = np.squeeze(data_tf, axis=map_dict[name_q][\"squeeze\"])\n if map_dict[name_q][\"transpose\"] is not None:\n data_tf = np.transpose(data_tf, map_dict[name_q][\"transpose\"])\n data_tf = torch.from_numpy(data_tf).type(torch.float32).to(\"cpu\")\n assert var_dict_torch[name].size() == data_tf.size(), \"{}, {}, {} != {}\".format(name, name_tf,\n var_dict_torch[\n name].size(),\n data_tf.size())\n var_dict_torch_update[name] = data_tf\n logging.info(\n \"torch tensor: {}, {}, loading from tf tensor: {}, {}\".format(name, data_tf.size(), name_v,\n var_dict_tf[name_tf].shape))\n \n elif names[1] == \"decoders3\":\n layeridx = int(names[2])\n name_q = name.replace(\".{}.\".format(layeridx), \".layeridx.\")\n \n layeridx_bias = 0\n layeridx += layeridx_bias\n if \"decoders.\" in name:\n decoder_layeridx_sets.add(layeridx)\n if name_q in map_dict.keys():\n name_v = map_dict[name_q][\"name\"]\n name_tf = name_v.replace(\"layeridx\", \"{}\".format(layeridx))\n data_tf = var_dict_tf[name_tf]\n if map_dict[name_q][\"squeeze\"] is not None:\n data_tf = np.squeeze(data_tf, axis=map_dict[name_q][\"squeeze\"])\n if map_dict[name_q][\"transpose\"] is not None:\n data_tf = np.transpose(data_tf, map_dict[name_q][\"transpose\"])\n data_tf = torch.from_numpy(data_tf).type(torch.float32).to(\"cpu\")\n assert var_dict_torch[name].size() == data_tf.size(), \"{}, {}, {} != {}\".format(name, name_tf,\n var_dict_torch[\n name].size(),\n data_tf.size())\n var_dict_torch_update[name] = data_tf\n logging.info(\n \"torch tensor: {}, {}, loading from tf tensor: {}, {}\".format(name, data_tf.size(), name_v,\n var_dict_tf[name_tf].shape))\n \n elif names[1] == \"embed\" or names[1] == \"output_layer\":\n name_tf = map_dict[name][\"name\"]\n if isinstance(name_tf, list):\n idx_list = 0\n if name_tf[idx_list] in var_dict_tf.keys():\n pass\n else:\n idx_list = 1\n data_tf = var_dict_tf[name_tf[idx_list]]\n if map_dict[name][\"squeeze\"][idx_list] is not None:\n data_tf = np.squeeze(data_tf, axis=map_dict[name][\"squeeze\"][idx_list])\n if map_dict[name][\"transpose\"][idx_list] is not None:\n data_tf = np.transpose(data_tf, map_dict[name][\"transpose\"][idx_list])\n data_tf = torch.from_numpy(data_tf).type(torch.float32).to(\"cpu\")\n assert var_dict_torch[name].size() == data_tf.size(), \"{}, {}, {} != {}\".format(name, name_tf,\n var_dict_torch[\n name].size(),\n data_tf.size())\n var_dict_torch_update[name] = data_tf\n logging.info(\"torch tensor: {}, {}, loading from tf tensor: {}, {}\".format(name, data_tf.size(),\n name_tf[idx_list],\n var_dict_tf[name_tf[\n idx_list]].shape))\n \n else:\n data_tf = var_dict_tf[name_tf]\n if map_dict[name][\"squeeze\"] is not None:\n data_tf = np.squeeze(data_tf, axis=map_dict[name][\"squeeze\"])\n if map_dict[name][\"transpose\"] is not None:\n data_tf = np.transpose(data_tf, map_dict[name][\"transpose\"])\n data_tf = torch.from_numpy(data_tf).type(torch.float32).to(\"cpu\")\n assert var_dict_torch[name].size() == data_tf.size(), \"{}, {}, {} != {}\".format(name, name_tf,\n var_dict_torch[\n name].size(),\n data_tf.size())\n var_dict_torch_update[name] = data_tf\n logging.info(\n \"torch tensor: {}, {}, loading from tf tensor: {}, {}\".format(name, data_tf.size(), name_tf,\n var_dict_tf[name_tf].shape))\n \n elif names[1] == \"after_norm\":\n name_tf = map_dict[name][\"name\"]\n data_tf = var_dict_tf[name_tf]\n data_tf = torch.from_numpy(data_tf).type(torch.float32).to(\"cpu\")\n var_dict_torch_update[name] = data_tf\n logging.info(\n \"torch tensor: {}, {}, loading from tf tensor: {}, {}\".format(name, data_tf.size(), name_tf,\n var_dict_tf[name_tf].shape))\n \n elif names[1] == \"embed_concat_ffn\":\n layeridx = int(names[2])\n name_q = name.replace(\".{}.\".format(layeridx), \".layeridx.\")\n \n layeridx_bias = 0\n layeridx += layeridx_bias\n if \"decoders.\" in name:\n decoder_layeridx_sets.add(layeridx)\n if name_q in map_dict.keys():\n name_v = map_dict[name_q][\"name\"]\n name_tf = name_v.replace(\"layeridx\", \"{}\".format(layeridx))\n data_tf = var_dict_tf[name_tf]\n if map_dict[name_q][\"squeeze\"] is not None:\n data_tf = np.squeeze(data_tf, axis=map_dict[name_q][\"squeeze\"])\n if map_dict[name_q][\"transpose\"] is not None:\n data_tf = np.transpose(data_tf, map_dict[name_q][\"transpose\"])\n data_tf = torch.from_numpy(data_tf).type(torch.float32).to(\"cpu\")\n assert var_dict_torch[name].size() == data_tf.size(), \"{}, {}, {} != {}\".format(name, name_tf,\n var_dict_torch[\n name].size(),\n data_tf.size())\n var_dict_torch_update[name] = data_tf\n logging.info(\n \"torch tensor: {}, {}, loading from tf tensor: {}, {}\".format(name, data_tf.size(), name_v,\n var_dict_tf[name_tf].shape))\n \n return var_dict_torch_update" } ]
from typing import List from typing import Tuple from funcodec.modules.streaming_utils import utils as myutils from funcodec.models.decoder.transformer_decoder import BaseTransformerDecoder from typeguard import check_argument_types from funcodec.modules.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt from funcodec.modules.embedding import PositionalEncoding from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM from funcodec.modules.repeat import repeat from funcodec.models.decoder.sanm_decoder import DecoderLayerSANM, ParaformerSANMDecoder import logging import torch import torch.nn as nn import numpy as np
13,827
class ContextualDecoderLayer(nn.Module): def __init__( self, size, self_attn, src_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, ): """Construct an DecoderLayer object.""" super(ContextualDecoderLayer, self).__init__() self.size = size self.self_attn = self_attn self.src_attn = src_attn self.feed_forward = feed_forward
class ContextualDecoderLayer(nn.Module): def __init__( self, size, self_attn, src_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, ): """Construct an DecoderLayer object.""" super(ContextualDecoderLayer, self).__init__() self.size = size self.self_attn = self_attn self.src_attn = src_attn self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
5
2023-10-07 02:00:40+00:00
16k
longzw1997/Open-GroundingDino
models/GroundingDINO/groundingdino.py
[ { "identifier": "box_ops", "path": "groundingdino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "get_tokenlizer", "path": "groundingdino/util/get_tokenlizer.py", "snippet": "def get_tokenlizer(text_encoder_type):\n if not isinstance(text_encoder_type, str):\n # print(\"text_encoder_type is not a str\")\n if hasattr(text_encoder_type, \"text_encoder_type\"):\n text_encoder_type = text_encoder_type.text_encoder_type\n elif text_encoder_type.get(\"text_encoder_type\", False):\n text_encoder_type = text_encoder_type.get(\"text_encoder_type\")\n elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):\n pass\n else:\n raise ValueError(\n \"Unknown type of text_encoder_type: {}\".format(type(text_encoder_type))\n )\n print(\"final text_encoder_type: {}\".format(text_encoder_type))\n tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)\n print(\"load tokenizer done.\")\n return tokenizer" }, { "identifier": "NestedTensor", "path": "groundingdino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == \"auto\":\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\n \"tensors dim must be 3 or 4 but {}({})\".format(\n self.tensors.dim(), self.tensors.shape\n )\n )\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\"tensors.shape\": self.tensors.shape, \"mask.shape\": self.mask.shape}" }, { "identifier": "accuracy", "path": "groundingdino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "groundingdino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "groundingdino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "inverse_sigmoid", "path": "groundingdino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1 / x2)" }, { "identifier": "is_dist_avail_and_initialized", "path": "groundingdino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "nested_tensor_from_tensor_list", "path": "groundingdino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], : img.shape[2]] = False\n else:\n raise ValueError(\"not supported\")\n return NestedTensor(tensor, mask)" }, { "identifier": "get_phrases_from_posmap", "path": "groundingdino/util/utils.py", "snippet": "def get_phrases_from_posmap(\n posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255\n):\n assert isinstance(posmap, torch.Tensor), \"posmap must be torch.Tensor\"\n if posmap.dim() == 1:\n posmap[0: left_idx + 1] = False\n posmap[right_idx:] = False\n non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()\n token_ids = [tokenized[\"input_ids\"][i] for i in non_zero_idx]\n return tokenizer.decode(token_ids)\n else:\n raise NotImplementedError(\"posmap must be 1-dim\")" }, { "identifier": "COCOVisualizer", "path": "groundingdino/util/visualizer.py", "snippet": "class COCOVisualizer:\n def __init__(self, coco=None, tokenlizer=None) -> None:\n self.coco = coco\n\n def visualize(self, img, tgt, caption=None, dpi=180, savedir=\"vis\"):\n \"\"\"\n img: tensor(3, H, W)\n tgt: make sure they are all on cpu.\n must have items: 'image_id', 'boxes', 'size'\n \"\"\"\n plt.figure(dpi=dpi)\n plt.rcParams[\"font.size\"] = \"5\"\n ax = plt.gca()\n img = renorm(img).permute(1, 2, 0)\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n ax.imshow(img)\n\n self.addtgt(tgt)\n\n if tgt is None:\n image_id = 0\n elif \"image_id\" not in tgt:\n image_id = 0\n else:\n image_id = tgt[\"image_id\"]\n\n if caption is None:\n savename = \"{}/{}-{}.png\".format(\n savedir, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n else:\n savename = \"{}/{}-{}-{}.png\".format(\n savedir, caption, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n print(\"savename: {}\".format(savename))\n os.makedirs(os.path.dirname(savename), exist_ok=True)\n plt.savefig(savename)\n plt.close()\n\n def addtgt(self, tgt):\n \"\"\" \"\"\"\n if tgt is None or not \"boxes\" in tgt:\n ax = plt.gca()\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n\n ax.set_axis_off()\n return\n\n ax = plt.gca()\n H, W = tgt[\"size\"]\n numbox = tgt[\"boxes\"].shape[0]\n\n color = []\n polygons = []\n boxes = []\n for box in tgt[\"boxes\"].cpu():\n unnormbbox = box * torch.Tensor([W, H, W, H])\n unnormbbox[:2] -= unnormbbox[2:] / 2\n [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()\n boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n color.append(c)\n\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n\n if \"strings_positive\" in tgt and len(tgt[\"strings_positive\"]) > 0:\n assert (\n len(tgt[\"strings_positive\"]) == numbox\n ), f\"{len(tgt['strings_positive'])} = {numbox}, \"\n for idx, strlist in enumerate(tgt[\"strings_positive\"]):\n cate_id = int(tgt[\"labels\"][idx])\n _string = str(cate_id) + \":\" + \" \".join(strlist)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"box_label\" in tgt:\n assert len(tgt[\"box_label\"]) == numbox, f\"{len(tgt['box_label'])} = {numbox}, \"\n for idx, bl in enumerate(tgt[\"box_label\"]):\n _string = str(bl)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n # plt.figure()\n # rainbow_text(0.0,0.0,\"all unicorns poop rainbows ! ! !\".split(),\n # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])\n\n if \"attn\" in tgt:\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n if isinstance(tgt[\"attn\"], tuple):\n tgt[\"attn\"] = [tgt[\"attn\"]]\n for item in tgt[\"attn\"]:\n attn_map, basergb = item\n attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)\n attn_map = (attn_map * 255).astype(np.uint8)\n cm = ColorMap(basergb)\n heatmap = cm(attn_map)\n ax.imshow(heatmap)\n ax.set_axis_off()\n\n def showAnns(self, anns, draw_bbox=False):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if \"segmentation\" in anns[0] or \"keypoints\" in anns[0]:\n datasetType = \"instances\"\n elif \"caption\" in anns[0]:\n datasetType = \"captions\"\n else:\n raise Exception(\"datasetType not supported\")\n if datasetType == \"instances\":\n ax = plt.gca()\n ax.set_autoscale_on(False)\n polygons = []\n color = []\n for ann in anns:\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n if \"segmentation\" in ann:\n if type(ann[\"segmentation\"]) == list:\n # polygon\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((int(len(seg) / 2), 2))\n polygons.append(Polygon(poly))\n color.append(c)\n else:\n # mask\n t = self.imgs[ann[\"image_id\"]]\n if type(ann[\"segmentation\"][\"counts\"]) == list:\n rle = maskUtils.frPyObjects(\n [ann[\"segmentation\"]], t[\"height\"], t[\"width\"]\n )\n else:\n rle = [ann[\"segmentation\"]]\n m = maskUtils.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n if ann[\"iscrowd\"] == 1:\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n if ann[\"iscrowd\"] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n if \"keypoints\" in ann and type(ann[\"keypoints\"]) == list:\n # turn skeleton into zero-based index\n sks = np.array(self.loadCats(ann[\"category_id\"])[0][\"skeleton\"]) - 1\n kp = np.array(ann[\"keypoints\"])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n for sk in sks:\n if np.all(v[sk] > 0):\n plt.plot(x[sk], y[sk], linewidth=3, color=c)\n plt.plot(\n x[v > 0],\n y[v > 0],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=\"k\",\n markeredgewidth=2,\n )\n plt.plot(\n x[v > 1],\n y[v > 1],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=c,\n markeredgewidth=2,\n )\n\n if draw_bbox:\n [bbox_x, bbox_y, bbox_w, bbox_h] = ann[\"bbox\"]\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n color.append(c)\n\n # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n # ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n elif datasetType == \"captions\":\n for ann in anns:\n print(ann[\"caption\"])" }, { "identifier": "create_positive_map_from_span", "path": "groundingdino/util/vl_utils.py", "snippet": "def create_positive_map_from_span(tokenized, token_span, max_text_len=256):\n \"\"\"construct a map such that positive_map[i,j] = True iff box i is associated to token j\n Input:\n - tokenized:\n - input_ids: Tensor[1, ntokens]\n - attention_mask: Tensor[1, ntokens]\n - token_span: list with length num_boxes.\n - each item: [start_idx, end_idx]\n \"\"\"\n positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)\n for j, tok_list in enumerate(token_span):\n for (beg, end) in tok_list:\n beg_pos = tokenized.char_to_token(beg)\n end_pos = tokenized.char_to_token(end - 1)\n if beg_pos is None:\n try:\n beg_pos = tokenized.char_to_token(beg + 1)\n if beg_pos is None:\n beg_pos = tokenized.char_to_token(beg + 2)\n except:\n beg_pos = None\n if end_pos is None:\n try:\n end_pos = tokenized.char_to_token(end - 2)\n if end_pos is None:\n end_pos = tokenized.char_to_token(end - 3)\n except:\n end_pos = None\n if beg_pos is None or end_pos is None:\n continue\n\n assert beg_pos is not None and end_pos is not None\n if os.environ.get(\"SHILONG_DEBUG_ONLY_ONE_POS\", None) == \"TRUE\":\n positive_map[j, beg_pos] = 1\n break\n else:\n positive_map[j, beg_pos : end_pos + 1].fill_(1)\n\n return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "build_backbone", "path": "models/GroundingDINO/backbone/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone:\n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords:\n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = True\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]\n args.backbone_freeze_keywords\n use_checkpoint = getattr(args, \"use_checkpoint\", False)\n\n if args.backbone in [\"resnet50\", \"resnet101\"]:\n backbone = Backbone(\n args.backbone,\n train_backbone,\n args.dilation,\n return_interm_indices,\n batch_norm=FrozenBatchNorm2d,\n )\n bb_num_channels = backbone.num_channels\n elif args.backbone in [\n \"swin_T_224_1k\",\n \"swin_B_224_22k\",\n \"swin_B_384_22k\",\n \"swin_L_224_22k\",\n \"swin_L_384_22k\",\n ]:\n pretrain_img_size = int(args.backbone.split(\"_\")[-2])\n backbone = build_swin_transformer(\n args.backbone,\n pretrain_img_size=pretrain_img_size,\n out_indices=tuple(return_interm_indices),\n dilation=False,\n use_checkpoint=use_checkpoint,\n )\n\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n\n assert len(bb_num_channels) == len(\n return_interm_indices\n ), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels\n assert isinstance(\n bb_num_channels, List\n ), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n # import ipdb; ipdb.set_trace()\n return model" }, { "identifier": "BertModelWarper", "path": "models/GroundingDINO/bertwarper.py", "snippet": "class BertModelWarper(nn.Module):\n def __init__(self, bert_model):\n super().__init__()\n # self.bert = bert_modelc\n\n self.config = bert_model.config\n self.embeddings = bert_model.embeddings\n self.encoder = bert_model.encoder\n self.pooler = bert_model.pooler\n\n self.get_extended_attention_mask = bert_model.get_extended_attention_mask\n self.invert_attention_mask = bert_model.invert_attention_mask\n self.get_head_mask = bert_model.get_head_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = (\n output_attentions if output_attentions is not None else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = (\n past_key_values[0][0].shape[2] if past_key_values is not None else 0\n )\n\n if attention_mask is None:\n attention_mask = torch.ones(\n ((batch_size, seq_length + past_key_values_length)), device=device\n )\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )" }, { "identifier": "generate_masks_with_special_tokens", "path": "models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n\n previous_col = col\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long)" }, { "identifier": "generate_masks_with_special_tokens_and_transfer_map", "path": "models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n cate_to_token_mask_list = [[] for _ in range(bs)]\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()\n c2t_maski[previous_col + 1 : col] = True\n cate_to_token_mask_list[row].append(c2t_maski)\n previous_col = col\n\n cate_to_token_mask_list = [\n torch.stack(cate_to_token_mask_listi, dim=0)\n for cate_to_token_mask_listi in cate_to_token_mask_list\n ]\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list" }, { "identifier": "build_transformer", "path": "models/GroundingDINO/transformer.py", "snippet": "def build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n learnable_tgt_init=True,\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n embed_init_tgt=args.embed_init_tgt,\n use_text_enhancer=args.use_text_enhancer,\n use_fusion_layer=args.use_fusion_layer,\n use_checkpoint=args.use_checkpoint,\n use_transformer_ckpt=args.use_transformer_ckpt,\n use_text_cross_attention=args.use_text_cross_attention,\n text_dropout=args.text_dropout,\n fusion_dropout=args.fusion_dropout,\n fusion_droppath=args.fusion_droppath,\n )" }, { "identifier": "MLP", "path": "models/GroundingDINO/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\"Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(\n nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])\n )\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "ContrastiveEmbed", "path": "models/GroundingDINO/utils.py", "snippet": "class ContrastiveEmbed(nn.Module):\n def __init__(self, max_text_len=256):\n \"\"\"\n Args:\n max_text_len: max length of text.\n \"\"\"\n super().__init__()\n self.max_text_len = max_text_len\n\n def forward(self, x, text_dict):\n \"\"\"_summary_\n\n Args:\n x (_type_): _description_\n text_dict (_type_): _description_\n {\n 'encoded_text': encoded_text, # bs, 195, d_model\n 'text_token_mask': text_token_mask, # bs, 195\n # True for used tokens. False for padding tokens\n }\n Returns:\n _type_: _description_\n \"\"\"\n assert isinstance(text_dict, dict)\n # print(x) #torch.Size([2, 16320, 256])\n # print(text_dict)\n\n # import pdb;pdb.set_trace()\n y = text_dict[\"encoded_text\"] #torch.Size([2, 195, 256])\n text_token_mask = text_dict[\"text_token_mask\"]\n\n res = x @ y.transpose(-1, -2)\n res.masked_fill_(~text_token_mask[:, None, :], float(\"-inf\"))\n # 接着,对res进行掩码操作,将未使用的文本token(即padding的token)对应的得分置为负无穷float(\"-inf\")。这是为了在计算相似度时,排除padding部分的影响。\n\n\n # padding to max_text_len\n new_res = torch.full((*res.shape[:-1], self.max_text_len), float(\"-inf\"), device=res.device)\n new_res[..., : res.shape[-1]] = res #torch.Size([2, 16320, 195])\n\n return new_res" }, { "identifier": "sigmoid_focal_loss", "path": "models/GroundingDINO/utils.py", "snippet": "def sigmoid_focal_loss(\n inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False\n):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if no_reduction:\n return loss\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "build_matcher", "path": "models/GroundingDINO/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" } ]
import copy import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast from groundingdino.util import box_ops, get_tokenlizer from groundingdino.util.misc import ( NestedTensor, accuracy, get_world_size, interpolate, inverse_sigmoid, is_dist_avail_and_initialized, nested_tensor_from_tensor_list, ) from groundingdino.util.utils import get_phrases_from_posmap from groundingdino.util.visualizer import COCOVisualizer from groundingdino.util.vl_utils import create_positive_map_from_span from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone from .bertwarper import ( BertModelWarper, generate_masks_with_special_tokens, generate_masks_with_special_tokens_and_transfer_map, ) from .transformer import build_transformer from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss from .matcher import build_matcher from pycocotools.coco import COCO
12,492
two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) def forward(self, samples: NestedTensor, targets: List = None, **kw): """The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if targets is None: captions = kw["captions"] else: captions = [t["caption"] for t in targets] # encoder texts tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( samples.device ) one_hot_token = tokenized ( text_self_attention_masks, position_ids, cate_to_token_mask_list, ) = generate_masks_with_special_tokens_and_transfer_map( tokenized, self.specical_tokens, self.tokenizer ) if text_self_attention_masks.shape[1] > self.max_text_len: text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] position_ids = position_ids[:, : self.max_text_len] tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] # extract text embeddings if self.sub_sentence_present: tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} tokenized_for_encoder["attention_mask"] = text_self_attention_masks tokenized_for_encoder["position_ids"] = position_ids else: tokenized_for_encoder = tokenized bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model text_token_mask = tokenized.attention_mask.bool() # bs, 195 # text_token_mask: True for nomask, False for mask # text_self_attention_masks: True for nomask, False for mask if encoded_text.shape[1] > self.max_text_len: encoded_text = encoded_text[:, : self.max_text_len, :] text_token_mask = text_token_mask[:, : self.max_text_len] position_ids = position_ids[:, : self.max_text_len] text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] text_dict = { "encoded_text": encoded_text, # bs, 195, d_model "text_token_mask": text_token_mask, # bs, 195 "position_ids": position_ids, # bs, 195 "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 } if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask
# ------------------------------------------------------------------------ # Grounding DINO # url: https://github.com/IDEA-Research/GroundingDINO # Copyright (c) 2023 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) def forward(self, samples: NestedTensor, targets: List = None, **kw): """The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if targets is None: captions = kw["captions"] else: captions = [t["caption"] for t in targets] # encoder texts tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( samples.device ) one_hot_token = tokenized ( text_self_attention_masks, position_ids, cate_to_token_mask_list, ) = generate_masks_with_special_tokens_and_transfer_map( tokenized, self.specical_tokens, self.tokenizer ) if text_self_attention_masks.shape[1] > self.max_text_len: text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] position_ids = position_ids[:, : self.max_text_len] tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] # extract text embeddings if self.sub_sentence_present: tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} tokenized_for_encoder["attention_mask"] = text_self_attention_masks tokenized_for_encoder["position_ids"] = position_ids else: tokenized_for_encoder = tokenized bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model text_token_mask = tokenized.attention_mask.bool() # bs, 195 # text_token_mask: True for nomask, False for mask # text_self_attention_masks: True for nomask, False for mask if encoded_text.shape[1] > self.max_text_len: encoded_text = encoded_text[:, : self.max_text_len, :] text_token_mask = text_token_mask[:, : self.max_text_len] position_ids = position_ids[:, : self.max_text_len] text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] text_dict = { "encoded_text": encoded_text, # bs, 195, d_model "text_token_mask": text_token_mask, # bs, 195 "position_ids": position_ids, # bs, 195 "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 } if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask
mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
5
2023-10-14 02:20:31+00:00
16k
Beckschen/3D-TransUNet
nn_transunet/trainer/network_trainer.py
[ { "identifier": "SegmentationNetwork", "path": "nn_transunet/networks/neural_network.py", "snippet": "class SegmentationNetwork(NeuralNetwork):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n\n # if we have 5 pooling then our patch size must be divisible by 2**5\n # for example in a 2d network that does 5 pool in x and 6 pool\n self.input_shape_must_be_divisible_by = None\n # in y this would be (32, 64)\n\n # we need to know this because we need to know if we are a 2d or a 3d netowrk\n self.conv_op = None # nn.Conv2d or nn.Conv3d\n\n # this tells us how many channely we have in the output. Important for preallocation in inference\n self.num_classes = None # number of channels in the output\n\n # depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions\n # during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what\n # to apply in inference. For the most part this will be softmax\n self.inference_apply_nonlin = lambda x: x # softmax_helper\n\n # This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the\n # center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians\n # can be expensive, so it makes sense to save and reuse them.\n self._gaussian_3d = self._patch_size_for_gaussian_3d = None\n self._gaussian_2d = self._patch_size_for_gaussian_2d = None\n\n def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),\n use_sliding_window: bool = False,\n step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,\n use_gaussian: bool = False, pad_border_mode: str = \"constant\",\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will\n detect that automatically and run the appropriate code.\n When running predictions, you need to specify whether you want to run fully convolutional of sliding window\n based inference. We very strongly recommend you use sliding window with the default settings.\n It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If\n the network is not in eval mode it will print a warning.\n :param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).\n :param do_mirroring: If True, use test time data augmentation in the form of mirroring\n :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three\n axes\n :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default\n :param step_size: When running sliding window prediction, the step size determines the distance between adjacent\n predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given\n as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between\n predictions. step_size cannot be larger than 1!\n :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,\n this will either crash or give potentially less accurate segmentations\n :param regions_class_order: Fabian only\n :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting\n to weigh predictions closer to the center of the current patch higher than those at the borders. The reason\n behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True\n :param pad_border_mode: leave this alone\n :param pad_kwargs: leave this alone\n :param all_in_gpu: experimental. You probably want to leave this as is it\n :param verbose: Do you want a wall of text? If yes then set this to True\n :param mixed_precision: if True, will run inference in mixed precision with autocast()\n :return:\n \"\"\"\n torch.cuda.empty_cache()\n\n assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \\\n 'predictions'\n\n if verbose:\n print(\"debug: mirroring\", do_mirroring, \"mirror_axes\", mirror_axes)\n\n assert self.get_device() != \"cpu\", \"CPU not implemented\"\n\n if pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n # A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old\n # code that uses this convention\n if len(mirror_axes):\n if self.conv_op == nn.Conv2d:\n if max(mirror_axes) > 1:\n raise ValueError(\"mirror axes. duh\")\n if self.conv_op == nn.Conv3d:\n if max(mirror_axes) > 2:\n raise ValueError(\"mirror axes. duh\")\n\n if self.training:\n print(\n 'WARNING! Network is in train mode during inference. This may be intended, or not...')\n\n assert len(x.shape) == 4, \"data must have shape (c,x,y,z)\"\n\n if mixed_precision:\n context = autocast\n else:\n context = no_op\n\n with context():\n with torch.no_grad():\n if self.conv_op == nn.Conv3d:\n if use_sliding_window:\n res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,\n verbose=verbose)\n else:\n res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)\n elif self.conv_op == nn.Conv2d:\n if use_sliding_window:\n res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs, all_in_gpu, False)\n else:\n res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs, all_in_gpu, False)\n else:\n raise RuntimeError(\n \"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is\")\n\n return res\n\n def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,\n step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,\n use_gaussian: bool = False, pad_border_mode: str = \"constant\",\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D\n image with that (you dummy).\n When running predictions, you need to specify whether you want to run fully convolutional of sliding window\n based inference. We very strongly recommend you use sliding window with the default settings.\n It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If\n the network is not in eval mode it will print a warning.\n :param x: Your input data. Must be a nd.ndarray of shape (c, x, y).\n :param do_mirroring: If True, use test time data augmentation in the form of mirroring\n :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three\n axes\n :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default\n :param step_size: When running sliding window prediction, the step size determines the distance between adjacent\n predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given\n as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between\n predictions. step_size cannot be larger than 1!\n :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,\n this will either crash or give potentially less accurate segmentations\n :param regions_class_order: Fabian only\n :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting\n to weigh predictions closer to the center of the current patch higher than those at the borders. The reason\n behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True\n :param pad_border_mode: leave this alone\n :param pad_kwargs: leave this alone\n :param all_in_gpu: experimental. You probably want to leave this as is it\n :param verbose: Do you want a wall of text? If yes then set this to True\n :return:\n \"\"\"\n torch.cuda.empty_cache()\n\n assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \\\n 'predictions'\n\n if self.conv_op == nn.Conv3d:\n raise RuntimeError(\n \"Cannot predict 2d if the network is 3d. Dummy.\")\n\n if verbose:\n print(\"debug: mirroring\", do_mirroring, \"mirror_axes\", mirror_axes)\n\n assert self.get_device() != \"cpu\", \"CPU not implemented\"\n\n if pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n # A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old\n # code that uses this convention\n if len(mirror_axes):\n if max(mirror_axes) > 1:\n raise ValueError(\"mirror axes. duh\")\n\n if self.training:\n print(\n 'WARNING! Network is in train mode during inference. This may be intended, or not...')\n\n assert len(x.shape) == 3, \"data must have shape (c,x,y)\"\n\n if mixed_precision:\n context = autocast\n else:\n context = no_op\n\n with context():\n with torch.no_grad():\n if self.conv_op == nn.Conv2d:\n if use_sliding_window:\n res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs, all_in_gpu, verbose)\n else:\n res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs, verbose)\n else:\n raise RuntimeError(\n \"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is\")\n\n return res\n\n @staticmethod\n def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:\n tmp = np.zeros(patch_size)\n center_coords = [i // 2 for i in patch_size]\n sigmas = [i * sigma_scale for i in patch_size]\n tmp[tuple(center_coords)] = 1\n gaussian_importance_map = gaussian_filter(\n tmp, sigmas, 0, mode='constant', cval=0)\n gaussian_importance_map = gaussian_importance_map / \\\n np.max(gaussian_importance_map) * 1\n gaussian_importance_map = gaussian_importance_map.astype(np.float32)\n\n # gaussian_importance_map cannot be 0, otherwise we may end up with nans!\n gaussian_importance_map[gaussian_importance_map == 0] = np.min(\n gaussian_importance_map[gaussian_importance_map != 0])\n\n return gaussian_importance_map\n\n @staticmethod\n def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:\n assert [i >= j for i, j in zip(\n image_size, patch_size)], \"image size must be as large or larger than patch_size\"\n assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'\n\n # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of\n # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46\n target_step_sizes_in_voxels = [i * step_size for i in patch_size]\n\n num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j,\n k in zip(image_size, target_step_sizes_in_voxels, patch_size)]\n\n steps = []\n for dim in range(len(patch_size)):\n # the highest step value for this dimension is\n max_step_value = image_size[dim] - patch_size[dim]\n if num_steps[dim] > 1:\n actual_step_size = max_step_value / (num_steps[dim] - 1)\n else:\n # does not matter because there is only one step at 0\n actual_step_size = 99999999999\n\n steps_here = [int(np.round(actual_step_size * i))\n for i in range(num_steps[dim])]\n\n steps.append(steps_here)\n\n return steps\n\n def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,\n patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,\n pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,\n verbose: bool) -> Tuple[np.ndarray, np.ndarray]:\n # better safe than sorry\n assert len(x.shape) == 4, \"x must be (c, x, y, z)\"\n assert self.get_device() != \"cpu\"\n if verbose:\n print(\"step_size:\", step_size)\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n assert patch_size is not None, \"patch_size cannot be None for tiled prediction\"\n\n # for sliding window inference the image must at least be as large as the patch size. It does not matter\n # whether the shape is divisible by 2**num_pool as long as the patch size is\n data, slicer = pad_nd_image(\n x, patch_size, pad_border_mode, pad_kwargs, True, None)\n data_shape = data.shape # still c, x, y, z\n\n # compute the steps for sliding window\n steps = self._compute_steps_for_sliding_window(\n patch_size, data_shape[1:], step_size)\n num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])\n\n if verbose:\n print(\"data shape:\", data_shape)\n print(\"patch size:\", patch_size)\n print(\"steps (x, y, and z):\", steps)\n print(\"number of tiles:\", num_tiles)\n\n # we only need to compute that once. It can take a while to compute this due to the large sigma in\n # gaussian_filter\n if use_gaussian and num_tiles > 1:\n if self._gaussian_3d is None or not all(\n [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):\n if verbose:\n print('computing Gaussian')\n gaussian_importance_map = self._get_gaussian(\n patch_size, sigma_scale=1. / 8)\n\n self._gaussian_3d = gaussian_importance_map\n self._patch_size_for_gaussian_3d = patch_size\n else:\n if verbose:\n print(\"using precomputed Gaussian\")\n gaussian_importance_map = self._gaussian_3d\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),\n non_blocking=True)\n\n else:\n gaussian_importance_map = None\n\n if all_in_gpu:\n # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces\n # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU\n\n if use_gaussian and num_tiles > 1:\n # half precision for the outputs should be good enough. If the outputs here are half, the\n # gaussian_importance_map should be as well\n gaussian_importance_map = gaussian_importance_map.half()\n\n # make sure we did not round anything to 0\n gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[\n gaussian_importance_map != 0].min()\n\n add_for_nb_of_preds = gaussian_importance_map\n else:\n add_for_nb_of_preds = torch.ones(\n data.shape[1:], device=self.get_device())\n\n if verbose:\n print(\"initializing result array (on GPU)\")\n aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n\n if verbose:\n print(\"moving data to GPU\")\n data = torch.from_numpy(data).cuda(\n self.get_device(), non_blocking=True)\n\n if verbose:\n print(\"initializing result_numsamples (on GPU)\")\n aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n else:\n if use_gaussian and num_tiles > 1:\n add_for_nb_of_preds = self._gaussian_3d\n else:\n add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)\n aggregated_results = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n aggregated_nb_of_predictions = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n\n for x in steps[0]:\n lb_x = x\n ub_x = x + patch_size[0]\n for y in steps[1]:\n lb_y = y\n ub_y = y + patch_size[1]\n for z in steps[2]:\n lb_z = z\n ub_z = z + patch_size[2]\n\n predicted_patch = self._internal_maybe_mirror_and_pred_3D(\n data[None, :, lb_x:ub_x, lb_y:ub_y,\n lb_z:ub_z], mirror_axes, do_mirroring,\n gaussian_importance_map)[0]\n\n if all_in_gpu:\n predicted_patch = predicted_patch.half()\n else:\n predicted_patch = predicted_patch.cpu().numpy()\n\n aggregated_results[:, lb_x:ub_x,\n lb_y:ub_y, lb_z:ub_z] += predicted_patch\n aggregated_nb_of_predictions[:, lb_x:ub_x,\n lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds\n\n # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size\n slicer = tuple(\n [slice(0, aggregated_results.shape[i]) for i in\n range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])\n aggregated_results = aggregated_results[slicer]\n aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]\n\n # computing the class_probabilities by dividing the aggregated result with result_numsamples\n class_probabilities = aggregated_results / aggregated_nb_of_predictions\n\n if regions_class_order is None:\n predicted_segmentation = class_probabilities.argmax(0)\n else:\n if all_in_gpu:\n class_probabilities_here = class_probabilities.detach().cpu().numpy()\n else:\n class_probabilities_here = class_probabilities\n predicted_segmentation = np.zeros(\n class_probabilities_here.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[class_probabilities_here[i] > 0.5] = c\n\n if all_in_gpu:\n if verbose:\n print(\"copying results to CPU\")\n\n if regions_class_order is None:\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n\n class_probabilities = class_probabilities.detach().cpu().numpy()\n\n if verbose:\n print(\"prediction done\")\n return predicted_segmentation, class_probabilities\n\n def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This one does fully convolutional inference. No sliding window\n \"\"\"\n assert len(x.shape) == 3, \"x must be (c, x, y)\"\n assert self.get_device() != \"cpu\"\n assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \\\n 'run _internal_predict_2D_2Dconv'\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,\n self.input_shape_must_be_divisible_by)\n\n predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,\n None)[0]\n\n slicer = tuple(\n [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -\n (len(slicer) - 1))] + slicer[1:])\n predicted_probabilities = predicted_probabilities[slicer]\n\n if regions_class_order is None:\n predicted_segmentation = predicted_probabilities.argmax(0)\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n else:\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n predicted_segmentation = np.zeros(\n predicted_probabilities.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[predicted_probabilities[i] > 0.5] = c\n\n return predicted_segmentation, predicted_probabilities\n\n def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,\n mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This one does fully convolutional inference. No sliding window\n \"\"\"\n assert len(x.shape) == 4, \"x must be (c, x, y, z)\"\n assert self.get_device() != \"cpu\"\n assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \\\n 'run _internal_predict_3D_3Dconv'\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,\n self.input_shape_must_be_divisible_by)\n\n predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,\n None)[0]\n\n slicer = tuple(\n [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -\n (len(slicer) - 1))] + slicer[1:])\n predicted_probabilities = predicted_probabilities[slicer]\n\n if regions_class_order is None:\n predicted_segmentation = predicted_probabilities.argmax(0)\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n else:\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n predicted_segmentation = np.zeros(\n predicted_probabilities.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[predicted_probabilities[i] > 0.5] = c\n\n return predicted_segmentation, predicted_probabilities\n\n def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,\n do_mirroring: bool = True,\n mult: np.ndarray or torch.tensor = None) -> torch.tensor:\n assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'\n # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here\n # we now return a cuda tensor! Not numpy array!\n\n x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())\n result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),\n dtype=torch.float).cuda(self.get_device(), non_blocking=True)\n\n if mult is not None:\n mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())\n\n if do_mirroring:\n mirror_idx = 8\n num_results = 2 ** len(mirror_axes)\n else:\n mirror_idx = 1\n num_results = 1\n\n for m in range(mirror_idx):\n if m == 0:\n pred = self.inference_apply_nonlin(self(x)) # self(x) - forward\n result_torch += 1 / num_results * pred\n\n if m == 1 and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, ))))\n result_torch += 1 / num_results * torch.flip(pred, (4,))\n\n if m == 2 and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))\n result_torch += 1 / num_results * torch.flip(pred, (3,))\n\n if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 3))\n\n if m == 4 and (0 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))\n result_torch += 1 / num_results * torch.flip(pred, (2,))\n\n if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 2))\n\n if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (3, 2))\n\n if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(\n self(torch.flip(x, (4, 3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))\n\n if mult is not None:\n result_torch[:, :] *= mult\n\n return result_torch\n\n def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,\n do_mirroring: bool = True,\n mult: np.ndarray or torch.tensor = None) -> torch.tensor:\n # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here\n # we now return a cuda tensor! Not numpy array!\n assert len(x.shape) == 4, 'x must be (b, c, x, y)'\n\n x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())\n result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),\n dtype=torch.float).cuda(self.get_device(), non_blocking=True)\n\n if mult is not None:\n mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())\n\n if do_mirroring:\n mirror_idx = 4\n num_results = 2 ** len(mirror_axes)\n else:\n mirror_idx = 1\n num_results = 1\n\n for m in range(mirror_idx):\n if m == 0:\n pred = self.inference_apply_nonlin(self(x))\n result_torch += 1 / num_results * pred\n\n if m == 1 and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))\n result_torch += 1 / num_results * torch.flip(pred, (3, ))\n\n if m == 2 and (0 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))\n result_torch += 1 / num_results * torch.flip(pred, (2, ))\n\n if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (3, 2))\n\n if mult is not None:\n result_torch[:, :] *= mult\n\n return result_torch\n\n def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,\n patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,\n pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,\n verbose: bool) -> Tuple[np.ndarray, np.ndarray]:\n # better safe than sorry\n assert len(x.shape) == 3, \"x must be (c, x, y)\"\n assert self.get_device() != \"cpu\"\n if verbose:\n print(\"step_size:\", step_size)\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n assert patch_size is not None, \"patch_size cannot be None for tiled prediction\"\n\n # for sliding window inference the image must at least be as large as the patch size. It does not matter\n # whether the shape is divisible by 2**num_pool as long as the patch size is\n data, slicer = pad_nd_image(\n x, patch_size, pad_border_mode, pad_kwargs, True, None)\n data_shape = data.shape # still c, x, y\n\n # compute the steps for sliding window\n steps = self._compute_steps_for_sliding_window(\n patch_size, data_shape[1:], step_size)\n num_tiles = len(steps[0]) * len(steps[1])\n\n if verbose:\n print(\"data shape:\", data_shape)\n print(\"patch size:\", patch_size)\n print(\"steps (x, y, and z):\", steps)\n print(\"number of tiles:\", num_tiles)\n\n # we only need to compute that once. It can take a while to compute this due to the large sigma in\n # gaussian_filter\n if use_gaussian and num_tiles > 1:\n if self._gaussian_2d is None or not all(\n [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):\n if verbose:\n print('computing Gaussian')\n gaussian_importance_map = self._get_gaussian(\n patch_size, sigma_scale=1. / 8)\n\n self._gaussian_2d = gaussian_importance_map\n self._patch_size_for_gaussian_2d = patch_size\n else:\n if verbose:\n print(\"using precomputed Gaussian\")\n gaussian_importance_map = self._gaussian_2d\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),\n non_blocking=True)\n else:\n gaussian_importance_map = None\n\n if all_in_gpu:\n # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces\n # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU\n\n if use_gaussian and num_tiles > 1:\n # half precision for the outputs should be good enough. If the outputs here are half, the\n # gaussian_importance_map should be as well\n gaussian_importance_map = gaussian_importance_map.half()\n\n # make sure we did not round anything to 0\n gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[\n gaussian_importance_map != 0].min()\n\n add_for_nb_of_preds = gaussian_importance_map\n else:\n add_for_nb_of_preds = torch.ones(\n data.shape[1:], device=self.get_device())\n\n if verbose:\n print(\"initializing result array (on GPU)\")\n aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n\n if verbose:\n print(\"moving data to GPU\")\n data = torch.from_numpy(data).cuda(\n self.get_device(), non_blocking=True)\n\n if verbose:\n print(\"initializing result_numsamples (on GPU)\")\n aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n else:\n if use_gaussian and num_tiles > 1:\n add_for_nb_of_preds = self._gaussian_2d\n else:\n add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)\n aggregated_results = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n aggregated_nb_of_predictions = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n\n for x in steps[0]:\n lb_x = x\n ub_x = x + patch_size[0]\n for y in steps[1]:\n lb_y = y\n ub_y = y + patch_size[1]\n\n predicted_patch = self._internal_maybe_mirror_and_pred_2D(\n data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,\n gaussian_importance_map)[0]\n\n if all_in_gpu:\n predicted_patch = predicted_patch.half()\n else:\n predicted_patch = predicted_patch.cpu().numpy()\n\n aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch\n aggregated_nb_of_predictions[:, lb_x:ub_x,\n lb_y:ub_y] += add_for_nb_of_preds\n\n # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size\n slicer = tuple(\n [slice(0, aggregated_results.shape[i]) for i in\n range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])\n aggregated_results = aggregated_results[slicer]\n aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]\n\n # computing the class_probabilities by dividing the aggregated result with result_numsamples\n class_probabilities = aggregated_results / aggregated_nb_of_predictions\n\n if regions_class_order is None:\n predicted_segmentation = class_probabilities.argmax(0)\n else:\n if all_in_gpu:\n class_probabilities_here = class_probabilities.detach().cpu().numpy()\n else:\n class_probabilities_here = class_probabilities\n predicted_segmentation = np.zeros(\n class_probabilities_here.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[class_probabilities_here[i] > 0.5] = c\n\n if all_in_gpu:\n if verbose:\n print(\"copying results to CPU\")\n\n if regions_class_order is None:\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n\n class_probabilities = class_probabilities.detach().cpu().numpy()\n\n if verbose:\n print(\"prediction done\")\n return predicted_segmentation, class_probabilities\n\n def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n predicted_segmentation = []\n softmax_pred = []\n for s in range(x.shape[1]):\n pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(\n x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n return predicted_segmentation, softmax_pred\n\n def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,\n pseudo3D_slices: int = 5, all_in_gpu: bool = False,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n assert pseudo3D_slices % 2 == 1, \"pseudo3D_slices must be odd\"\n extra_slices = (pseudo3D_slices - 1) // 2\n\n shp_for_pad = np.array(x.shape)\n shp_for_pad[1] = extra_slices\n\n pad = np.zeros(shp_for_pad, dtype=np.float32)\n data = np.concatenate((pad, x, pad), 1)\n\n predicted_segmentation = []\n softmax_pred = []\n for s in range(extra_slices, data.shape[1] - extra_slices):\n d = data[:, (s - extra_slices):(s + extra_slices + 1)]\n d = d.reshape((-1, d.shape[-2], d.shape[-1]))\n pred_seg, softmax_pres = \\\n self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,\n regions_class_order, pad_border_mode, pad_kwargs, verbose)\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n\n return predicted_segmentation, softmax_pred\n\n def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), step_size: float = 0.5,\n regions_class_order: tuple = None, use_gaussian: bool = False,\n pad_border_mode: str = \"edge\", pad_kwargs: dict = None,\n all_in_gpu: bool = False,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n\n predicted_segmentation = []\n softmax_pred = []\n\n for s in range(x.shape[1]):\n pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(\n x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,\n pad_border_mode, pad_kwargs, all_in_gpu, verbose)\n\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n\n return predicted_segmentation, softmax_pred" }, { "identifier": "ModelLossSemsegGatedCRF", "path": "nn_transunet/trainer/loss_functions.py", "snippet": "class ModelLossSemsegGatedCRF(torch.nn.Module):\n \"\"\"\n This module provides an implementation of the Gated CRF Loss for Weakly Supervised Semantic Image Segmentation.\n This loss function promotes consistent label assignment guided by input features, such as RGBXY.\n Please consider using the following bibtex for citation:\n @article{obukhov2019gated,\n author={Anton Obukhov and Stamatios Georgoulis and Dengxin Dai and Luc {Van Gool}},\n title={Gated {CRF} Loss for Weakly Supervised Semantic Image Segmentation},\n journal={CoRR},\n volume={abs/1906.04651},\n year={2019},\n url={http://arxiv.org/abs/1906.04651},\n }\n \"\"\"\n\n def forward(\n self, y_hat_softmax, kernels_desc, kernels_radius, sample, height_input, width_input,\n mask_src=None, mask_dst=None, compatibility=None, custom_modality_downsamplers=None, out_kernels_vis=False\n ):\n \"\"\"\n Performs the forward pass of the loss.\n :param y_hat_softmax: A tensor of predicted per-pixel class probabilities of size NxCxHxW\n :param kernels_desc: A list of dictionaries, each describing one Gaussian kernel composition from modalities.\n The final kernel is a weighted sum of individual kernels. Following example is a composition of\n RGBXY and XY kernels:\n kernels_desc: [{\n 'weight': 0.9, # Weight of RGBXY kernel\n 'xy': 6, # Sigma for XY\n 'rgb': 0.1, # Sigma for RGB\n },{\n 'weight': 0.1, # Weight of XY kernel\n 'xy': 6, # Sigma for XY\n }]\n :param kernels_radius: Defines size of bounding box region around each pixel in which the kernel is constructed.\n :param sample: A dictionary with modalities (except 'xy') used in kernels_desc parameter. Each of the provided\n modalities is allowed to be larger than the shape of y_hat_softmax, in such case downsampling will be\n invoked. Default downsampling method is area resize; this can be overriden by setting.\n custom_modality_downsamplers parameter.\n :param width_input, height_input: Dimensions of the full scale resolution of modalities\n :param mask_src: (optional) Source mask.\n :param mask_dst: (optional) Destination mask.\n :param compatibility: (optional) Classes compatibility matrix, defaults to Potts model.\n :param custom_modality_downsamplers: A dictionary of modality downsampling functions.\n :param out_kernels_vis: Whether to return a tensor with kernels visualized with some step.\n :return: Loss function value.\n \"\"\"\n assert y_hat_softmax.dim() == 4, 'Prediction must be a NCHW batch'\n N, C, height_pred, width_pred = y_hat_softmax.shape\n device = y_hat_softmax.device\n\n assert width_input % width_pred == 0 and height_input % height_pred == 0 and \\\n width_input * height_pred == height_input * width_pred, \\\n f'[{width_input}x{height_input}] !~= [{width_pred}x{height_pred}]'\n\n kernels = self._create_kernels(\n kernels_desc, kernels_radius, sample, N, height_pred, width_pred, device, custom_modality_downsamplers\n )\n\n denom = N * height_pred * width_pred\n\n def resize_fix_mask(mask, name):\n assert mask.dim() == 4 and mask.shape[:2] == (N, 1) and mask.dtype == torch.float32, \\\n f'{name} mask must be a NCHW batch with C=1 and dtype float32'\n if mask.shape[2:] != (height_pred, width_pred):\n mask = ModelLossSemsegGatedCRF._downsample(\n mask, 'mask', height_pred, width_pred, custom_modality_downsamplers\n )\n mask[mask != mask] = 0.0 # handle NaN\n # handle edges of mask after interpolation\n mask[mask < 1.0] = 0.0\n return mask\n\n if mask_src is not None:\n mask_src = resize_fix_mask(mask_src, 'Source')\n denom = mask_src.sum().clamp(min=1)\n mask_src = self._unfold(mask_src, kernels_radius)\n kernels = kernels * mask_src\n\n if mask_dst is not None:\n mask_dst = resize_fix_mask(mask_dst, 'Destination')\n denom = mask_dst.sum().clamp(min=1)\n mask_dst = mask_dst.view(N, 1, 1, 1, height_pred, width_pred)\n kernels = kernels * mask_dst\n\n y_hat_unfolded = self._unfold(y_hat_softmax, kernels_radius)\n\n product_kernel_x_y_hat = (kernels * y_hat_unfolded) \\\n .view(N, C, (kernels_radius * 2 + 1) ** 2, height_pred, width_pred) \\\n .sum(dim=2, keepdim=False)\n\n if compatibility is None:\n # Using shortcut for Pott's class compatibility model\n loss = -(product_kernel_x_y_hat * y_hat_softmax).sum()\n # comment out to save computation, total loss may go below 0\n loss = kernels.sum() + loss\n else:\n assert compatibility.shape == (\n C, C), f'Compatibility matrix expected shape [{C}x{C}]'\n assert (compatibility < 0).int().sum(\n ) == 0, 'Compatibility matrix must not have negative values'\n assert compatibility.diag.sum() == 0, 'Compatibility matrix diagonal must be 0'\n compat = (C - 1) * \\\n F.normalize(compatibility.float().to(device), p=1, dim=1)\n y_hat_CxNHW = y_hat_softmax.permute(\n 1, 0, 2, 3).contiguous().view(C, -1)\n product_kernel_x_y_hat_NHWxC = product_kernel_x_y_hat.permute(\n 0, 2, 3, 1).contiguous().view(-1, C)\n product_CxC = torch.mm(y_hat_CxNHW, product_kernel_x_y_hat_NHWxC)\n loss = (compat * product_CxC).sum()\n del product_CxC\n\n out = {\n 'loss': loss / denom,\n }\n\n if out_kernels_vis:\n out['kernels_vis'] = self._visualize_kernels(\n kernels, kernels_radius, height_input, width_input, height_pred, width_pred\n )\n\n return out\n\n @staticmethod\n def _downsample(img, modality, height_dst, width_dst, custom_modality_downsamplers):\n if custom_modality_downsamplers is not None and modality in custom_modality_downsamplers:\n f_down = custom_modality_downsamplers[modality]\n else:\n f_down = F.adaptive_avg_pool2d\n return f_down(img, (height_dst, width_dst))\n\n @staticmethod\n def _create_kernels(\n kernels_desc, kernels_radius, sample, N, height_pred, width_pred, device, custom_modality_downsamplers\n ):\n kernels = None\n for i, desc in enumerate(kernels_desc):\n weight = desc['weight']\n features = []\n for modality, sigma in desc.items():\n if modality == 'weight':\n continue\n if modality == 'xy':\n feature = ModelLossSemsegGatedCRF._get_mesh(\n N, height_pred, width_pred, device)\n else:\n # assert modality in sample, 'Modality {} is listed in {}-th kernel descriptor, but not present in the sample'.format(modality, i)\n feature = sample\n feature = ModelLossSemsegGatedCRF._downsample(\n feature, modality, height_pred, width_pred, custom_modality_downsamplers\n )\n feature /= sigma\n features.append(feature)\n features = torch.cat(features, dim=1)\n kernel = weight * \\\n ModelLossSemsegGatedCRF._create_kernels_from_features(\n features, kernels_radius)\n kernels = kernel if kernels is None else kernel + kernels\n return kernels\n\n @staticmethod\n def _create_kernels_from_features(features, radius):\n assert features.dim() == 4, 'Features must be a NCHW batch'\n N, C, H, W = features.shape\n kernels = ModelLossSemsegGatedCRF._unfold(features, radius)\n kernels = kernels - kernels[:, :, radius,\n radius, :, :].view(N, C, 1, 1, H, W)\n kernels = (-0.5 * kernels ** 2).sum(dim=1, keepdim=True).exp()\n kernels[:, :, radius, radius, :, :] = 0\n return kernels\n\n @staticmethod\n def _get_mesh(N, H, W, device):\n return torch.cat((\n torch.arange(0, W, 1, dtype=torch.float32, device=device).view(\n 1, 1, 1, W).repeat(N, 1, H, 1),\n torch.arange(0, H, 1, dtype=torch.float32, device=device).view(\n 1, 1, H, 1).repeat(N, 1, 1, W)\n ), 1)\n\n @staticmethod\n def _unfold(img, radius):\n assert img.dim() == 4, 'Unfolding requires NCHW batch'\n N, C, H, W = img.shape\n diameter = 2 * radius + 1\n return F.unfold(img, diameter, 1, radius).view(N, C, diameter, diameter, H, W)\n\n @staticmethod\n def _visualize_kernels(kernels, radius, height_input, width_input, height_pred, width_pred):\n diameter = 2 * radius + 1\n vis = kernels[:, :, :, :, radius::diameter, radius::diameter]\n vis_nh, vis_nw = vis.shape[-2:]\n vis = vis.permute(0, 1, 4, 2, 5, 3).contiguous().view(\n kernels.shape[0], 1, diameter * vis_nh, diameter * vis_nw)\n if vis.shape[2] > height_pred:\n vis = vis[:, :, :height_pred, :]\n if vis.shape[3] > width_pred:\n vis = vis[:, :, :, :width_pred]\n if vis.shape[2:] != (height_pred, width_pred):\n vis = F.pad(vis, [0, width_pred - vis.shape[3],\n 0, height_pred - vis.shape[2]])\n vis = F.interpolate(vis, (height_input, width_input), mode='nearest')\n return vis" } ]
from _warnings import warn from typing import Tuple from batchgenerators.utilities.file_and_folder_operations import * from nn_transunet.networks.neural_network import SegmentationNetwork from sklearn.model_selection import KFold from torch import nn from torch.cuda.amp import GradScaler, autocast from torch.optim.lr_scheduler import _LRScheduler from ..trainer.loss_functions import ModelLossSemsegGatedCRF from time import time, sleep from collections import OrderedDict from abc import abstractmethod from datetime import datetime from tqdm import trange from ..utils.dist_utils import check_call_hdfs_command, mkdir_hdfs import matplotlib import numpy as np import matplotlib.pyplot as plt import sys import torch.backends.cudnn as cudnn import torch import math import matplotlib.pyplot as plt
13,783
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. matplotlib.use("agg") def maybe_to_torch(d): if isinstance(d, list): d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] elif not isinstance(d, torch.Tensor): d = torch.from_numpy(d).float() return d def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs) ** exponent def warmup_poly_lr(epoch, max_epochs, warmup_epochs, initial_lr, exponent=0.9): if epoch < warmup_epochs: return initial_lr * (float(epoch) / float(max(1.0, warmup_epochs))) epoch_rel = epoch - warmup_epochs max_epochs_rel = max_epochs - warmup_epochs return initial_lr * (1 - epoch_rel / max_epochs_rel) ** exponent def to_cuda(data, non_blocking=True, gpu_id=0): if isinstance(data, list): data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] else: data = data.cuda(gpu_id, non_blocking=non_blocking) return data class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ################################### self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None self.optimizer = None self.lr_scheduler = None self.tr_gen = self.val_gen = None self.was_initialized = False self.initial_lr = 1e-2 ################# SET THESE IN INIT ################################################ self.output_folder = None self.fold = None # self.loss = PartiallyCrossEntropyLoss() self.loss = None
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. matplotlib.use("agg") def maybe_to_torch(d): if isinstance(d, list): d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] elif not isinstance(d, torch.Tensor): d = torch.from_numpy(d).float() return d def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs) ** exponent def warmup_poly_lr(epoch, max_epochs, warmup_epochs, initial_lr, exponent=0.9): if epoch < warmup_epochs: return initial_lr * (float(epoch) / float(max(1.0, warmup_epochs))) epoch_rel = epoch - warmup_epochs max_epochs_rel = max_epochs - warmup_epochs return initial_lr * (1 - epoch_rel / max_epochs_rel) ** exponent def to_cuda(data, non_blocking=True, gpu_id=0): if isinstance(data, list): data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] else: data = data.cuda(gpu_id, non_blocking=non_blocking) return data class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ################################### self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None self.optimizer = None self.lr_scheduler = None self.tr_gen = self.val_gen = None self.was_initialized = False self.initial_lr = 1e-2 ################# SET THESE IN INIT ################################################ self.output_folder = None self.fold = None # self.loss = PartiallyCrossEntropyLoss() self.loss = None
self.gatecrfloss = ModelLossSemsegGatedCRF()
1
2023-10-11 05:19:25+00:00
16k
AMAAI-Lab/Video2Music
train.py
[ { "identifier": "compute_vevo_accuracy", "path": "dataset/vevo_dataset.py", "snippet": "def compute_vevo_accuracy(out, tgt):\n softmax = nn.Softmax(dim=-1)\n out = torch.argmax(softmax(out), dim=-1)\n\n out = out.flatten()\n tgt = tgt.flatten()\n\n mask = (tgt != CHORD_PAD)\n\n out = out[mask]\n tgt = tgt[mask]\n\n if(len(tgt) == 0):\n return 1.0\n\n num_right = (out == tgt)\n num_right = torch.sum(num_right).type(TORCH_FLOAT)\n\n acc = num_right / len(tgt)\n\n return acc" }, { "identifier": "create_vevo_datasets", "path": "dataset/vevo_dataset.py", "snippet": "def create_vevo_datasets(dataset_root = \"./dataset\", max_seq_chord=300, max_seq_video=300, vis_models=\"2d/clip_l14p\", emo_model=\"6c_l14p\", split_ver=\"v1\", random_seq=True, is_video=True):\n\n train_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"train\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n val_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"val\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n test_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"test\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n return train_dataset, val_dataset, test_dataset" }, { "identifier": "MusicTransformer", "path": "model/music_transformer.py", "snippet": "class MusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi=2048, max_sequence_chord=300, rpr=False):\n super(MusicTransformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n\n # self.embedding_key = nn.Embedding(1, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n\n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n # RPR Transformer\n else:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, x_root, x_attr, feature_key, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n\n ### Chord + Key (DECODER) ###\n # x = self.embedding(x)\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n xf = self.Linear_chord(x)\n\n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n xf = self.positional_encoding(xf)\n \n ### TRANSFORMER ###\n x_out = self.transformer(src=xf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n \n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n\n # generate\n def generate(self, feature_key=None, primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0):\n assert (not self.training), \"Cannot generate while in training mode\"\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n print(\"Generating sequence of max length:\", target_seq_length)\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n \n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n # gen_seq_batch = gen_seq.clone()\n # y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :CHORD_END]\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], feature_key) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n #print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n \n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "VideoMusicTransformer", "path": "model/video_music_transformer.py", "snippet": "class VideoMusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi =2048, max_sequence_video=300, max_sequence_chord=300, total_vf_dim = 0, rpr=False):\n super(VideoMusicTransformer, self).__init__()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_video = max_sequence_video\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n \n self.total_vf_dim = total_vf_dim\n self.Linear_vis = nn.Linear(self.total_vf_dim, self.d_model)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n \n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.positional_encoding_video = PositionalEncoding(self.d_model, self.dropout, self.max_seq_video)\n\n # Add condition (minor or major)\n self.condition_linear = nn.Linear(1, self.d_model)\n \n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff\n )\n # RPR Transformer\n else:\n decoder_norm = LayerNorm(self.d_model)\n decoder_layer = TransformerDecoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n decoder = TransformerDecoderRPR(decoder_layer, self.nlayers, decoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=decoder\n ) \n \n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n \n def forward(self, x, x_root, x_attr, feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n\n xf = self.Linear_chord(x)\n\n ### Video (SemanticList + SceneOffset + Motion + Emotion) (ENCODER) ###\n vf_concat = feature_semantic_list[0].float()\n\n for i in range(1, len(feature_semantic_list)):\n vf_concat = torch.cat( (vf_concat, feature_semantic_list[i].float()), dim=2) \n \n vf_concat = torch.cat([vf_concat, feature_scene_offset.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_motion.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_emotion.float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf = self.Linear_vis(vf_concat)\n \n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n vf = vf.permute(1,0,2) # -> (max_seq_video, batch_size, d_model)\n xf = self.positional_encoding(xf)\n vf = self.positional_encoding_video(vf)\n\n ### TRANSFORMER ###\n x_out = self.transformer(src=vf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n\n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n \n def generate(self, feature_semantic_list = [], feature_key=None, feature_scene_offset=None, feature_motion=None, feature_emotion=None,\n primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0, max_conseq_N = 0, max_conseq_chord = 2):\n \n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], \n feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n # token_probs.shape : [1, 157] \n # 0: N, 1: C, ... , 156: B:maj7\n # 157 chordEnd 158 padding\n if max_conseq_N == 0:\n token_probs[0][0] = 0.0\n isMaxChord = True\n if cur_i >= max_conseq_chord :\n preChord = gen_seq[0][cur_i-1].item() \n for k in range (1, max_conseq_chord):\n if preChord != gen_seq[0][cur_i-1-k].item():\n isMaxChord = False\n else:\n isMaxChord = False\n \n if isMaxChord:\n preChord = gen_seq[0][cur_i-1].item()\n token_probs[0][preChord] = 0.0\n \n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Grabs the default device. Default device is CUDA if available and use_cuda is not False, CPU otherwise.\n ----------\n \"\"\"\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Sets whether to use CUDA (if available), or use the CPU (not recommended)\n ----------\n \"\"\"\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Class for custom learn rate scheduler (to be used by torch.optim.lr_scheduler.LambdaLR).\n\n Learn rate for each step (batch) given the warmup steps is:\n lr = [ 1/sqrt(d_model) ] * min[ 1/sqrt(step) , step * (warmup_steps)^-1.5 ]\n\n This is from Attention is All you Need (https://arxiv.org/abs/1706.03762)\n ----------\n \"\"\"\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Method to pass to LambdaLR. Increments the step and computes the new learn rate.\n ----------\n \"\"\"\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Hack to get the current learn rate of the model\n ----------\n \"\"\"\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-dataset_dir\", type=str, default=\"./dataset/\", help=\"Folder of VEVO dataset\")\n \n parser.add_argument(\"-input_dir_music\", type=str, default=\"./dataset/vevo_chord/\" + MUSIC_TYPE, help=\"Folder of video CNN feature files\")\n parser.add_argument(\"-input_dir_video\", type=str, default=\"./dataset/vevo_vis\", help=\"Folder of video CNN feature files\")\n\n parser.add_argument(\"-output_dir\", type=str, default=\"./saved_models\", help=\"Folder to save model weights. Saves one every epoch\")\n \n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-n_workers\", type=int, default=1, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=1, help=\"Batch size to use\")\n parser.add_argument(\"-epochs\", type=int, default=5, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-max_sequence_midi\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-max_sequence_video\", type=int, default=300, help=\"Maximum video sequence to consider\")\n parser.add_argument(\"-max_sequence_chord\", type=int, default=300, help=\"Maximum video sequence to consider\")\n\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"-is_video\", type=bool, default=IS_VIDEO, help=\"MusicTransformer or VideoMusicTransformer\")\n\n if IS_VIDEO:\n parser.add_argument(\"-vis_models\", type=str, default=VIS_MODELS_SORTED, help=\"...\")\n else:\n parser.add_argument(\"-vis_models\", type=str, default=\"\", help=\"...\")\n\n parser.add_argument(\"-emo_model\", type=str, default=\"6c_l14p\", help=\"...\")\n parser.add_argument(\"-rpr\", type=bool, default=RPR, help=\"...\")\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n print(SEPERATOR)\n \n print(\"dataset_dir:\", args.dataset_dir )\n \n print(\"input_dir_music:\", args.input_dir_music)\n print(\"input_dir_video:\", args.input_dir_video)\n\n print(\"output_dir:\", args.output_dir)\n\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n\n print(\"max_sequence_midi:\", args.max_sequence_midi)\n print(\"max_sequence_video:\", args.max_sequence_video)\n print(\"max_sequence_chord:\", args.max_sequence_chord)\n \n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(\"is_video:\", args.is_video)\n\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n\n o_stream.write(\"max_sequence_midi: \" + str(args.max_sequence_midi) + \"\\n\")\n o_stream.write(\"max_sequence_video: \" + str(args.max_sequence_video) + \"\\n\")\n o_stream.write(\"max_sequence_chord: \" + str(args.max_sequence_chord) + \"\\n\")\n \n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.write(\"is_video: \" + str(args.is_video) + \"\\n\")\n o_stream.write(\"vis_models: \" + str(args.vis_models) + \"\\n\")\n o_stream.write(\"input_dir_music: \" + str(args.input_dir_music) + \"\\n\")\n o_stream.write(\"input_dir_video: \" + str(args.input_dir_video) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model_vevo.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, \n train_loss_func, train_loss_emotion_func,\n opt, lr_scheduler=None, print_modulus=1, isVideo=True):\n \n loss_chord = -1\n loss_emotion = -1\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n opt.zero_grad()\n\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n\n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n # use VideoMusicTransformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = train_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n \n else:\n #videomusic tran nosep\n y = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = train_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n else:\n # music transformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n\n loss_chord = loss_chord_root + loss_chord_attr\n loss_emotion = -1\n \n total_loss = loss_chord\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n else:\n # use MusicTransformer (no sep)\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = -1\n\n total_loss = loss_chord\n total_loss.backward()\n\n opt.step()\n\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n \n if((batch_num+1) % print_modulus == 0):\n print(SEPERATOR)\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num+1, \"/\", len(dataloader))\n print(\"LR:\", get_lr(opt))\n print(\"Train loss (total):\", float(total_loss))\n print(\"Train loss (chord):\", float(loss_chord))\n print(\"Train loss (emotion):\", float(loss_emotion))\n print(\"\")\n print(\"Time (s):\", time_took)\n print(SEPERATOR)\n print(\"\")\n return" }, { "identifier": "eval_model", "path": "utilities/run_model_vevo.py", "snippet": "def eval_model(model, dataloader, \n eval_loss_func, eval_loss_emotion_func,\n isVideo = True, isGenConfusionMatrix=False):\n model.eval()\n avg_acc = -1\n avg_cor = -1\n avg_acc_cor = -1\n\n avg_h1 = -1\n avg_h3 = -1\n avg_h5 = -1\n \n avg_loss_chord = -1\n avg_loss_emotion = -1\n avg_total_loss = -1\n\n true_labels = []\n true_root_labels = []\n true_attr_labels = []\n \n pred_labels = []\n pred_root_labels = []\n pred_attr_labels = []\n \n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n n_test_cor = 0 \n\n sum_loss_chord = 0.0\n sum_loss_emotion = 0.0\n sum_total_loss = 0.0\n\n sum_acc = 0.0\n sum_cor = 0.0\n\n sum_h1 = 0.0\n sum_h3 = 0.0\n sum_h5 = 0.0\n \n for batch in dataloader:\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n \n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n y= model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n\n tgt = tgt.flatten()\n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n \n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n if isGenConfusionMatrix:\n pred = y.argmax(dim=1).detach().cpu().numpy()\n pred_root = []\n pred_attr = []\n\n for i in pred:\n if i == 0:\n pred_root.append(0)\n pred_attr.append(0)\n elif i == 157:\n pred_root.append(CHORD_ROOT_END)\n pred_attr.append(CHORD_ATTR_END)\n elif i == 158:\n pred_root.append(CHORD_ROOT_PAD)\n pred_attr.append(CHORD_ATTR_PAD)\n else:\n rootindex = int( (i-1)/13 ) + 1\n attrindex = (i-1)%13 + 1\n pred_root.append(rootindex)\n pred_attr.append(attrindex)\n \n pred_root = np.array(pred_root)\n pred_attr = np.array(pred_attr)\n\n true = tgt.detach().cpu().numpy()\n true_root = tgt_root.detach().cpu().numpy()\n true_attr = tgt_attr.detach().cpu().numpy()\n \n pred_labels.extend(pred)\n pred_root_labels.extend(pred_root)\n pred_attr_labels.extend(pred_attr)\n \n true_labels.extend(true)\n true_root_labels.extend(true_root)\n true_attr_labels.extend(true_attr)\n else:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n \n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n # use MusicTransformer no sep\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n \n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n\n tgt_emotion = tgt_emotion.squeeze()\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = loss_chord\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n avg_loss_chord = sum_loss_chord / n_test\n avg_loss_emotion = sum_loss_emotion / n_test\n avg_total_loss = sum_total_loss / n_test\n\n avg_acc = sum_acc / n_test\n avg_cor = sum_cor / n_test_cor\n \n avg_h1 = sum_h1 / n_test\n avg_h3 = sum_h3 / n_test\n avg_h5 = sum_h5 / n_test\n \n avg_acc_cor = (avg_acc + avg_cor)/ 2.0\n\n if isGenConfusionMatrix:\n chordInvDicPath = \"./dataset/vevo_meta/chord_inv.json\"\n chordRootInvDicPath = \"./dataset/vevo_meta/chord_root_inv.json\"\n chordAttrInvDicPath = \"./dataset/vevo_meta/chord_attr_inv.json\"\n \n with open(chordInvDicPath) as json_file:\n chordInvDic = json.load(json_file)\n with open(chordRootInvDicPath) as json_file:\n chordRootInvDic = json.load(json_file)\n with open(chordAttrInvDicPath) as json_file:\n chordAttrInvDic = json.load(json_file)\n\n # Confusion matrix (CHORD)\n topChordList = []\n with open(\"./dataset/vevo_meta/top_chord.txt\", encoding = 'utf-8') as f:\n for line in f:\n line = line.strip()\n line_arr = line.split(\" \")\n if len(line_arr) == 3 :\n chordID = line_arr[1]\n topChordList.append( int(chordID) )\n topChordList = np.array(topChordList)\n topChordList = topChordList[:10]\n mask = np.isin(true_labels, topChordList)\n true_labels = np.array(true_labels)[mask]\n pred_labels = np.array(pred_labels)[mask]\n\n conf_matrix = confusion_matrix(true_labels, pred_labels, labels=topChordList)\n label_names = [ chordInvDic[str(label_id)] for label_id in topChordList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(topChordList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix.png\")\n plt.show()\n\n # Confusion matrix (CHORD ROOT) \n chordRootList = np.arange(1, 13)\n conf_matrix = confusion_matrix(true_root_labels, pred_root_labels, labels= chordRootList )\n \n label_names = [ chordRootInvDic[str(label_id)] for label_id in chordRootList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord root)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordRootList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_root.png\")\n plt.show()\n\n # Confusion matrix (CHORD ATTR)\n chordAttrList = np.arange(1, 14)\n conf_matrix = confusion_matrix(true_attr_labels, pred_attr_labels, labels= chordAttrList )\n \n label_names = [ chordAttrInvDic[str(label_id)] for label_id in chordAttrList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord quality)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordAttrList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_quality.png\")\n plt.show()\n\n return { \"avg_total_loss\" : avg_total_loss, \n \"avg_loss_chord\" : avg_loss_chord, \n \"avg_loss_emotion\": avg_loss_emotion, \n \"avg_acc\" : avg_acc, \n \"avg_cor\" : avg_cor, \n \"avg_acc_cor\" : avg_acc_cor, \n \"avg_h1\" : avg_h1, \n \"avg_h3\" : avg_h3,\n \"avg_h5\" : avg_h5 }" } ]
import os import csv import shutil import torch import torch.nn as nn from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.vevo_dataset import compute_vevo_accuracy, create_vevo_datasets from model.music_transformer import MusicTransformer from model.video_music_transformer import VideoMusicTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model_vevo import train_epoch, eval_model from torch.utils.tensorboard import SummaryWriter
12,950
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs:
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs:
print_train_args(args)
10
2023-10-13 09:06:24+00:00
16k
RobotLocomotion/gcs-science-robotics
reproduction/bimanual/helpers.py
[ { "identifier": "BezierGCS", "path": "gcs/bezier.py", "snippet": "class BezierGCS(BaseGCS):\n def __init__(self, regions, order, continuity, edges=None, hdot_min=1e-6, full_dim_overlap=False):\n BaseGCS.__init__(self, regions)\n\n self.order = order\n self.continuity = continuity\n assert continuity < order\n\n A_time = np.vstack((np.eye(order + 1), -np.eye(order + 1),\n np.eye(order, order + 1) - np.eye(order, order + 1, 1)))\n b_time = np.concatenate((1e3*np.ones(order + 1), np.zeros(order + 1), -hdot_min * np.ones(order)))\n self.time_scaling_set = HPolyhedron(A_time, b_time)\n\n for i, r in enumerate(self.regions):\n self.gcs.AddVertex(\n r.CartesianPower(order + 1).CartesianProduct(self.time_scaling_set),\n name = self.names[i] if not self.names is None else '')\n\n # Formulate edge costs and constraints\n u_control = MakeMatrixContinuousVariable(\n self.dimension, order + 1, \"xu\")\n v_control = MakeMatrixContinuousVariable(\n self.dimension, order + 1, \"xv\")\n u_duration = MakeVectorContinuousVariable(order + 1, \"Tu\")\n v_duration = MakeVectorContinuousVariable(order + 1, \"Tv\")\n\n self.u_vars = np.concatenate((u_control.flatten(\"F\"), u_duration))\n self.u_r_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n u_control)\n self.u_h_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n np.expand_dims(u_duration, 0))\n\n edge_vars = np.concatenate((u_control.flatten(\"F\"), u_duration, v_control.flatten(\"F\"), v_duration))\n v_r_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n v_control)\n v_h_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n np.expand_dims(v_duration, 0))\n\n # Continuity constraints\n self.contin_constraints = []\n for deriv in range(continuity + 1):\n u_path_deriv = self.u_r_trajectory.MakeDerivative(deriv)\n v_path_deriv = v_r_trajectory.MakeDerivative(deriv)\n path_continuity_error = v_path_deriv.control_points()[0] - u_path_deriv.control_points()[-1]\n self.contin_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(path_continuity_error, edge_vars),\n np.zeros(self.dimension)))\n\n u_time_deriv = self.u_h_trajectory.MakeDerivative(deriv)\n v_time_deriv = v_h_trajectory.MakeDerivative(deriv)\n time_continuity_error = v_time_deriv.control_points()[0] - u_time_deriv.control_points()[-1]\n self.contin_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(time_continuity_error, edge_vars), 0.0))\n\n self.deriv_constraints = []\n self.edge_costs = []\n\n # Add edges to graph and apply costs/constraints\n if edges is None:\n if full_dim_overlap:\n edges = self.findEdgesViaFullDimensionOverlaps()\n else:\n edges = self.findEdgesViaOverlaps()\n\n vertices = self.gcs.Vertices()\n for ii, jj in edges:\n u = vertices[ii]\n v = vertices[jj]\n edge = self.gcs.AddEdge(u, v, f\"({u.name()}, {v.name()})\")\n\n for c_con in self.contin_constraints:\n edge.AddConstraint(Binding[Constraint](\n c_con, np.append(u.x(), v.x())))\n\n def addTimeCost(self, weight):\n assert isinstance(weight, float) or isinstance(weight, int)\n\n u_time_control = self.u_h_trajectory.control_points()\n segment_time = u_time_control[-1] - u_time_control[0]\n time_cost = LinearCost(\n weight * DecomposeLinearExpressions(segment_time, self.u_vars)[0], 0.)\n self.edge_costs.append(time_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](time_cost, edge.xu()))\n\n def addPathLengthCost(self, weight):\n if isinstance(weight, float) or isinstance(weight, int):\n weight_matrix = weight * np.eye(self.dimension)\n else:\n assert(len(weight) == self.dimension)\n weight_matrix = np.diag(weight)\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n for ii in range(len(u_path_control)):\n H = DecomposeLinearExpressions(u_path_control[ii] / self.order, self.u_vars)\n path_cost = L2NormCost(np.matmul(weight_matrix, H), np.zeros(self.dimension))\n self.edge_costs.append(path_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](path_cost, edge.xu()))\n\n def addPathLengthIntegralCost(self, weight, integration_points=100):\n if isinstance(weight, float) or isinstance(weight, int):\n weight_matrix = weight * np.eye(self.dimension)\n else:\n assert(len(weight) == self.dimension)\n weight_matrix = np.diag(weight)\n\n s_points = np.linspace(0., 1., integration_points + 1)\n u_path_deriv = self.u_r_trajectory.MakeDerivative(1)\n\n if u_path_deriv.basis().order() == 1:\n for t in [0.0, 1.0]:\n q_ds = u_path_deriv.value(t)\n costs = []\n for ii in range(self.dimension):\n costs.append(q_ds[ii])\n H = DecomposeLinearExpressions(costs, self.u_vars)\n integral_cost = L2NormCost(np.matmul(weight_matrix, H), np.zeros(self.dimension))\n self.edge_costs.append(integral_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](integral_cost, edge.xu()))\n else:\n q_ds = u_path_deriv.vector_values(s_points)\n for ii in range(integration_points + 1):\n costs = []\n for jj in range(self.dimension):\n if ii == 0 or ii == integration_points:\n costs.append(0.5 * 1./integration_points * q_ds[jj, ii])\n else:\n costs.append(1./integration_points * q_ds[jj, ii])\n H = DecomposeLinearExpressions(costs, self.u_vars)\n integral_cost = L2NormCost(np.matmul(weight_matrix, H), np.zeros(self.dimension))\n self.edge_costs.append(integral_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](integral_cost, edge.xu()))\n\n def addPathEnergyCost(self, weight):\n if isinstance(weight, float) or isinstance(weight, int):\n weight_matrix = weight * np.eye(self.dimension)\n else:\n assert(len(weight) == self.dimension)\n weight_matrix = np.diag(weight)\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n u_time_control = self.u_h_trajectory.MakeDerivative(1).control_points()\n for ii in range(len(u_path_control)):\n A_ctrl = DecomposeLinearExpressions(u_path_control[ii], self.u_vars)\n b_ctrl = DecomposeLinearExpressions(u_time_control[ii], self.u_vars)\n H = np.vstack(((self.order) * b_ctrl, np.matmul(np.sqrt(weight_matrix), A_ctrl)))\n energy_cost = PerspectiveQuadraticCost(H, np.zeros(H.shape[0]))\n self.edge_costs.append(energy_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](energy_cost, edge.xu()))\n\n def addDerivativeRegularization(self, weight_r, weight_h, order):\n\n assert isinstance(order, int) and 2 <= order <= self.order\n weights = [weight_r, weight_h]\n for weight in weights:\n assert isinstance(weight, float) or isinstance(weight, int)\n\n trajectories = [self.u_r_trajectory, self.u_h_trajectory]\n for traj, weight in zip(trajectories, weights):\n derivative_control = traj.MakeDerivative(order).control_points()\n for c in derivative_control:\n A_ctrl = DecomposeLinearExpressions(c, self.u_vars)\n H = A_ctrl.T.dot(A_ctrl) * 2 * weight / (1 + self.order - order)\n reg_cost = QuadraticCost(H, np.zeros(H.shape[0]), 0)\n self.edge_costs.append(reg_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](reg_cost, edge.xu()))\n\n def addVelocityLimits(self, lower_bound, upper_bound):\n assert len(lower_bound) == self.dimension\n assert len(upper_bound) == self.dimension\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n u_time_control = self.u_h_trajectory.MakeDerivative(1).control_points()\n lb = np.expand_dims(lower_bound, 1)\n ub = np.expand_dims(upper_bound, 1)\n\n for ii in range(len(u_path_control)):\n A_ctrl = DecomposeLinearExpressions(u_path_control[ii], self.u_vars)\n b_ctrl = DecomposeLinearExpressions(u_time_control[ii], self.u_vars)\n A_constraint = np.vstack((A_ctrl - ub * b_ctrl, -A_ctrl + lb * b_ctrl))\n velocity_con = LinearConstraint(\n A_constraint, -np.inf*np.ones(2*self.dimension), np.zeros(2*self.dimension))\n self.deriv_constraints.append(velocity_con)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddConstraint(Binding[Constraint](velocity_con, edge.xu()))\n\n def addSourceTarget(self, source, target, edges=None, velocity=None, zero_deriv_boundary=None):\n source_edges, target_edges = super().addSourceTarget(source, target, edges)\n\n if velocity is not None:\n assert velocity.shape == (2, self.dimension)\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n u_time_control = self.u_h_trajectory.MakeDerivative(1).control_points()\n initial_velocity_error = np.squeeze(u_path_control[0]) - velocity[0] * np.squeeze(u_time_control[0])\n final_velocity_error = np.squeeze(u_path_control[-1]) - velocity[1] * np.squeeze(u_time_control[-1])\n initial_velocity_con = LinearEqualityConstraint(\n DecomposeLinearExpressions(initial_velocity_error, self.u_vars),\n np.zeros(self.dimension))\n final_velocity_con = LinearEqualityConstraint(\n DecomposeLinearExpressions(final_velocity_error, self.u_vars),\n np.zeros(self.dimension))\n\n if zero_deriv_boundary is not None:\n assert self.order > zero_deriv_boundary + 1\n initial_constraints = []\n final_constraints = []\n\n for deriv in range(1, zero_deriv_boundary+1):\n u_path_control = self.u_r_trajectory.MakeDerivative(deriv).control_points()\n initial_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(np.squeeze(u_path_control[0]), self.u_vars),\n np.zeros(self.dimension)))\n final_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(np.squeeze(u_path_control[-1]), self.u_vars),\n np.zeros(self.dimension)))\n\n for edge in source_edges:\n for jj in range(self.dimension):\n edge.AddConstraint(edge.xu()[jj] == edge.xv()[jj])\n\n if velocity is not None:\n edge.AddConstraint(Binding[Constraint](initial_velocity_con, edge.xv()))\n if zero_deriv_boundary is not None:\n for i_con in initial_constraints:\n edge.AddConstraint(Binding[Constraint](i_con, edge.xv()))\n\n edge.AddConstraint(edge.xv()[-(self.order + 1)] == 0.)\n\n for edge in target_edges: \n for jj in range(self.dimension):\n edge.AddConstraint(\n edge.xu()[-(self.dimension + self.order + 1) + jj] == edge.xv()[jj])\n\n if velocity is not None:\n edge.AddConstraint(Binding[Constraint](final_velocity_con, edge.xu()))\n if zero_deriv_boundary is not None:\n for f_con in final_constraints:\n edge.AddConstraint(Binding[Constraint](f_con, edge.xu()))\n\n for cost in self.edge_costs:\n edge.AddCost(Binding[Cost](cost, edge.xu()))\n\n for d_con in self.deriv_constraints:\n edge.AddConstraint(Binding[Constraint](d_con, edge.xu()))\n\n\n def SolvePath(self, rounding=False, verbose=False, preprocessing=False):\n best_path, best_result, results_dict = self.solveGCS(\n rounding, preprocessing, verbose)\n\n if best_path is None:\n return None, results_dict\n\n # Extract trajectory control points\n knots = np.zeros(self.order + 1)\n path_control_points = []\n time_control_points = []\n for edge in best_path:\n if edge.v() == self.target:\n knots = np.concatenate((knots, [knots[-1]]))\n path_control_points.append(best_result.GetSolution(edge.xv()))\n time_control_points.append(np.array([best_result.GetSolution(edge.xu())[-1]]))\n break\n edge_time = knots[-1] + 1.\n knots = np.concatenate((knots, np.full(self.order, edge_time)))\n edge_path_points = np.reshape(best_result.GetSolution(edge.xv())[:-(self.order + 1)],\n (self.dimension, self.order + 1), \"F\")\n edge_time_points = best_result.GetSolution(edge.xv())[-(self.order + 1):]\n for ii in range(self.order):\n path_control_points.append(edge_path_points[:, ii])\n time_control_points.append(np.array([edge_time_points[ii]]))\n\n offset = time_control_points[0].copy()\n for ii in range(len(time_control_points)):\n time_control_points[ii] -= offset\n\n path_control_points = np.array(path_control_points).T\n time_control_points = np.array(time_control_points).T\n\n path = BsplineTrajectory(BsplineBasis(self.order + 1, knots), path_control_points)\n time_traj = BsplineTrajectory(BsplineBasis(self.order + 1, knots), time_control_points)\n\n return BezierTrajectory(path, time_traj), results_dict" }, { "identifier": "LinearGCS", "path": "gcs/linear.py", "snippet": "class LinearGCS(BaseGCS):\n def __init__(self, regions, edges=None, path_weights=None, full_dim_overlap=False):\n BaseGCS.__init__(self, regions)\n\n if path_weights is None:\n path_weights = np.ones(self.dimension)\n elif isinstance(path_weights, float) or isinstance(path_weights, int):\n path_weights = path_weights * np.ones(self.dimension)\n assert len(path_weights) == self.dimension\n\n self.edge_cost = L2NormCost(\n np.hstack((np.diag(-path_weights), np.diag(path_weights))),\n np.zeros(self.dimension))\n\n for i, r in enumerate(self.regions):\n self.gcs.AddVertex(r, name = self.names[i] if not self.names is None else '')\n\n if edges is None:\n if full_dim_overlap:\n edges = self.findEdgesViaFullDimensionOverlaps()\n else:\n edges = self.findEdgesViaOverlaps()\n\n vertices = self.gcs.Vertices()\n for ii, jj in edges:\n u = vertices[ii]\n v = vertices[jj]\n edge = self.gcs.AddEdge(u, v, f\"({u.name()}, {v.name()})\")\n\n edge_length = edge.AddCost(Binding[Cost](\n self.edge_cost, np.append(u.x(), v.x())))[1]\n\n # Constrain point in v to be in u\n edge.AddConstraint(Binding[Constraint](\n LinearConstraint(u.set().A(),\n -np.inf*np.ones(len(u.set().b())),\n u.set().b()),\n v.x()))\n\n def addSourceTarget(self, source, target, edges=None):\n source_edges, target_edges = super().addSourceTarget(source, target, edges)\n\n for edge in source_edges:\n for jj in range(self.dimension):\n edge.AddConstraint(edge.xu()[jj] == edge.xv()[jj])\n\n for edge in target_edges:\n edge.AddCost(Binding[Cost](\n self.edge_cost, np.append(edge.xu(), edge.xv())))\n\n\n def SolvePath(self, rounding=False, verbose=False, preprocessing=False):\n best_path, best_result, results_dict = self.solveGCS(\n rounding, preprocessing, verbose)\n\n if best_path is None:\n return None, results_dict\n\n # Extract trajectory\n waypoints = np.empty((self.dimension, 0))\n for edge in best_path:\n new_waypoint = best_result.GetSolution(edge.xv())\n waypoints = np.concatenate(\n [waypoints, np.expand_dims(new_waypoint, 1)], axis=1)\n\n return waypoints, results_dict" }, { "identifier": "set_transparency_of_models", "path": "reproduction/prm_comparison/helpers.py", "snippet": "def set_transparency_of_models(plant, model_instances, alpha, scene_graph):\n \"\"\"Sets the transparency of the given models.\"\"\"\n inspector = scene_graph.model_inspector()\n for model in model_instances:\n for body_id in plant.GetBodyIndices(model):\n frame_id = plant.GetBodyFrameIdOrThrow(body_id)\n for geometry_id in inspector.GetGeometries(frame_id,\n Role.kIllustration):\n properties = inspector.GetIllustrationProperties(geometry_id)\n phong = properties.GetProperty(\"phong\", \"diffuse\")\n phong.set(phong.r(), phong.g(), phong.b(), alpha)\n properties.UpdateProperty(\"phong\", \"diffuse\", phong)\n scene_graph.AssignRole(plant.get_source_id(), geometry_id,\n properties, RoleAssign.kReplace)" } ]
import numpy as np import os import time from copy import copy from pydrake.common import FindResourceOrThrow from pydrake.geometry import ( CollisionFilterDeclaration, GeometrySet, MeshcatVisualizer, Rgba, Role, SceneGraph ) from pydrake.math import RigidTransform, RollPitchYaw, RotationMatrix from pydrake.multibody.inverse_kinematics import InverseKinematics from pydrake.multibody.parsing import LoadModelDirectives, Parser, ProcessModelDirectives from pydrake.multibody.plant import AddMultibodyPlantSceneGraph, MultibodyPlant from pydrake.perception import PointCloud from pydrake.solvers import MosekSolver, Solve from pydrake.systems.analysis import Simulator from pydrake.systems.framework import DiagramBuilder, LeafSystem from pydrake.systems.primitives import TrajectorySource from pydrake.systems.rendering import MultibodyPositionToGeometryPose from gcs.bezier import BezierGCS from gcs.linear import LinearGCS from gcs.rounding import * from reproduction.prm_comparison.helpers import set_transparency_of_models from reproduction.util import *
12,024
def filterCollsionGeometry(scene_graph, context): filter_manager = scene_graph.collision_filter_manager(context) inspector = scene_graph.model_inspector() iiwa1 = [[], [], [], [], [], [], [], []] iiwa2 = [[], [], [], [], [], [], [], []] wsg1 = [] wsg2 = [] shelf = [] bins = [[], []] table = [] for gid in inspector.GetGeometryIds( GeometrySet(inspector.GetAllGeometryIds()), Role.kProximity): gid_name = inspector.GetName(inspector.GetFrameId(gid)) if "iiwa_1::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa1[int(link_num)].append(gid) elif "iiwa_2::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa2[int(link_num)].append(gid) elif "wsg_1" in gid_name: wsg1.append(gid) elif "wsg_2" in gid_name: wsg2.append(gid) elif "shelves::" in gid_name: shelf.append(gid) elif "binR" in gid_name: bins[0].append(gid) elif "binL" in gid_name: bins[1].append(gid) elif "table" in gid_name: table.append(gid) else: print("Geometry", gid_name, "not assigned to an object.") filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[1] + iiwa1[2]+ iiwa1[3]), GeometrySet(iiwa1[4] + iiwa1[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[3] + iiwa1[4]), GeometrySet(iiwa1[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2] + iiwa1[3] + iiwa1[4] + iiwa1[5] + iiwa1[6]), GeometrySet(iiwa1[7] + wsg1))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + iiwa1[4]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[1] + iiwa2[2]+ iiwa2[3]), GeometrySet(iiwa2[4] + iiwa2[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[3] + iiwa2[4]), GeometrySet(iiwa2[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[2] + iiwa2[3] + iiwa2[4] + iiwa2[5] + iiwa2[6]), GeometrySet(iiwa2[7] + wsg2))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + iiwa2[4]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[2]))) pairs = scene_graph.get_query_output_port().Eval(context).inspector().GetCollisionCandidates() print("Filtered collision pairs from", len(inspector.GetCollisionCandidates()), "to", len(pairs)) # initial_guess = np.concatenate((q0, q0)) # min_dist = (0.01, 0.01)??? def runBimanualIK(plant, context, wsg1_id, wsg2_id, wsg1_pose, wsg2_pose, initial_guess, min_dist=None): hand_frame1 = plant.GetBodyByName("body", wsg1_id).body_frame() hand_frame2 = plant.GetBodyByName("body", wsg2_id).body_frame() ik = InverseKinematics(plant, context) if min_dist is not None: ik.AddMinimumDistanceConstraint(*min_dist) ik.prog().AddBoundingBoxConstraint(plant.GetPositionLowerLimits(), plant.GetPositionUpperLimits(), ik.q()) ik.prog().SetInitialGuess(ik.q(), initial_guess) ik.prog().AddQuadraticCost((ik.q() - initial_guess).dot(ik.q() - initial_guess)) ik.AddPositionConstraint(hand_frame1, [0, 0, 0], plant.world_frame(), wsg1_pose.translation(), wsg1_pose.translation()) ik.AddOrientationConstraint(hand_frame1, RotationMatrix(), plant.world_frame(), wsg1_pose.rotation(), 0.001) ik.AddPositionConstraint(hand_frame2, [0, 0, 0], plant.world_frame(), wsg2_pose.translation(), wsg2_pose.translation()) ik.AddOrientationConstraint(hand_frame2, RotationMatrix(), plant.world_frame(), wsg2_pose.rotation(), 0.001) result = Solve(ik.prog()) return result.GetSolution(ik.q()) def visualizeConfig(diagram, plant, context, q): plant_context = plant.GetMyMutableContextFromRoot(context) plant.SetPositions(plant_context, q) diagram.ForcedPublish(context) def getLinearGcsPath(regions, sequence): path = [sequence[0]] run_time = 0.0
def getIkSeeds(): return { "top_shelf/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "top_shelf/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "top_shelf/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "top_shelf/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "shelf_1/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_1/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "shelf_1/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "shelf_1/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "shelf_2/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_2/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "shelf_2/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "shelf_2/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "bin_R/top_shelf": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "bin_R/shelf_1": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "bin_R/shelf_2": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "bin_R/bin_L": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "top_shelf/shelf_1_extract": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.35, 0.65])), "top_shelf/shelf_2_extract": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.35, 0.4])), "shelf_2_extract/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_1_extract/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "top_shelf/shelf_1_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.3), [0.7, 0.15, 0.65])), "cross_table/top_shelf_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi), [0.4, 0.4, 0.2]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9])), "shelf_2_cross/top_shelf_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2+0.4), [0.7, 0.35, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.4), [0.7, 0.15, 0.9])), } def getConfigurationSeeds(): return { "top_shelf/top_shelf": [0.37080011, 0.41394084, -0.16861973, -0.70789778, -0.37031516, 0.60412162, 0.39982981, -0.37080019, 0.41394089, 0.16861988, -0.70789766, 0.37031506, 0.60412179, -0.39982996], "top_shelf/shelf_1": [0.37080079, 0.41394132, -0.16862043, -0.70789679, -0.37031656, 0.60412327, 0.39982969, -0.93496924, 0.46342534, 0.92801666, -1.45777635, -0.31061724, -0.0657716, -0.06019899], "top_shelf/shelf_2": [0.37086448, 0.41394538, -0.16875166, -0.70789745, -0.37020563, 0.60411217, 0.399785, -0.4416204 , 0.62965228, 0.20598405, -1.73324339, -0.41354372, -0.68738414, 0.17443976], "top_shelf/bin_L": [0.37081989, 0.41394235, -0.16866012, -0.70789737, -0.37028201, 0.60411923, 0.39981634, -0.89837331, -1.1576151 , 1.75505216, -1.37515153, 1.0676443 , 1.56371166, -0.64126346], "shelf_1/top_shelf": [0.93496924, 0.46342534, -0.92801666, -1.45777635, 0.31061724, -0.0657716 , 0.06019899, -0.37080079, 0.41394132, 0.16862043, -0.70789679, 0.37031656, 0.60412327, -0.39982969], "shelf_1/shelf_1": [0.87224109, 0.43096634, -0.82223436, -1.45840049, 0.73813452, -0.08999384, -0.41624203, -0.87556489, 0.43246906, 0.82766047, -1.45838515, -0.72259842, -0.0884963, 0.39840129], "shelf_1/shelf_2": [0.93496866, 0.463425 , -0.92801564, -1.45777634, 0.3106235, -0.06577172, 0.06019173, -0.44158858, 0.62964838, 0.20594112, -1.73324341, -0.41354987, -0.6873923 , 0.17446778], "shelf_1/bin_L": [0.93496918, 0.46342531, -0.92801656, -1.45777637, 0.31061728, -0.06577167, 0.06019927, -0.89837321, -1.15761746, 1.75504915, -1.37515113, 1.06764716, 1.56371454, -0.64126383], "shelf_2/top_shelf": [0.4416204, 0.62965228, -0.20598405, -1.73324339, 0.41354372, -0.68738414, -0.17443976, -0.37086448, 0.41394538, 0.16875166, -0.70789745, 0.37020563, 0.60411217, -0.399785], "shelf_2/shelf_1": [0.44158858, 0.62964838, -0.20594112, -1.73324341, 0.41354987, -0.6873923, -0.17446778, -0.93496866, 0.463425 , 0.92801564, -1.45777634, -0.3106235 , -0.06577172, -0.06019173], "shelf_2/shelf_2": [0.44161313, 0.62965141, -0.20597435, -1.73324346, 0.41354447, -0.68738613, -0.17444557, -0.4416132 , 0.62965142, 0.20597452, -1.73324348, -0.41354416, -0.68738609, 0.17444625], "shelf_2/bin_L": [0.44161528, 0.62965169, -0.20597726, -1.73324347, 0.41354399, -0.68738565, -0.17444283, -1.37292761, -0.68372976, 2.96705973, -1.41521783, 2.96705973, -1.11343251, -3.0140737 ], "bin_R/top_shelf": [0.81207926, -1.25359738, -1.58098625, -1.5155474 , -1.32223687, 1.50549708, -2.38221725, -0.37085114, 0.4139444 , 0.16872443, -0.70789757, 0.37022786, 0.60411401, -0.39979449], "bin_R/shelf_1": [0.81207923, -1.25358454, -1.58100042, -1.51554769, -1.32222337, 1.50548369, -2.3822204 , -0.9349716 , 0.46342674, 0.92802082, -1.45777624, -0.31059455, -0.0657707 , -0.06022391], "bin_R/shelf_2": [0.81207937, -1.25360462, -1.58097816, -1.51554761, -1.32224557, 1.50550485, -2.38221483, -0.44166552, 0.62965782, 0.20604497, -1.7332434 , -0.41353464, -0.6873727 , 0.17439863], "bin_R/bin_L": [-1.73637519, 0.6209681 , 0.24232887, -1.51538355, -0.17977474, 0.92618894, -3.01360257, 1.31861497, 0.72394333, 0.4044295 , -1.37509496, -0.27461997, 1.20038493, 0.18611701], "neutral/neutral": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "neutral/shelf_1": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.93496866, 0.463425 , 0.92801564, -1.45777634, -0.3106235 , -0.06577172, -0.06019173], "neutral/shelf_2": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.44166552, 0.62965782, 0.20604497, -1.7332434 , -0.41353464, -0.6873727 , 0.17439863], "shelf_1/neutral": [0.93496924, 0.46342534, -0.92801666, -1.45777635, 0.31061724, -0.0657716 , 0.06019899, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "shelf_2/neutral": [0.44161528, 0.62965169, -0.20597726, -1.73324347, 0.41354399, -0.68738565, -0.17444283, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "shelf_2_cross/top_shelf_cross": [0.47500706, 0.72909874, 0.01397772, -1.52841372, 0.15392366, -0.591641, -0.12870521, -0.48821156, 0.67762534, 0.02049926, -0.27420758, 0.10620709, 0.72215209, -0.09973172], } # Additional seed points not needed to connect the graph # "neutral/shelf_1_extract": [ 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.35486829, -0.10621117, -0.09276445, -1.94995786, 1.88826556, 0.46922151, -1.98267349], # "neutral/shelf_2_extract": [ 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.3078069 , 0.56765359, -0.86829439, -2.0943951 , 2.53950045, 1.09607546, -2.4169564], # "shelf_1_extract/neutral": [-1.05527083, -0.43710629, 1.15648812, -1.95011062, 0.24422131, -0.07820216, 0.15872416, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], # "shelf_2_extract/neutral": [-0.30739053, 0.5673891 , 0.86772198, -2.0943951 , -2.53946773, 1.09586777, 2.41729532, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], # "cross_table/top_shelf_cross": [ 0.04655887, 0.97997658, 0.52004246, -1.91926412, -1.37518707, -0.88823968, 0.07674699, -0.5921624 , 0.83651867, 0.20513136, -0.00257881, 0.51748756, 0.92012332, -0.51686487], def getDemoConfigurations(): return [ [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], [0.69312848, 0.36303784, -0.66625368, -1.49515991, 0.3230085, -0.10942887, -0.09496304, -0.69312891, 0.36303794, 0.66625426, -1.49515975, -0.32300928, -0.10942832, 0.0949629], [0.2014604, 0.66463495, 0.16799372, -1.66212763, -0.09131682, -0.64368844, -0.03645568, -0.38777291, 0.56141139, -0.05760515, -0.47447495, 0.06515541, 0.63627899, -0.02552148], [-1.8487163 , 0.71749397, 0.66464618, -1.4912954 , -0.52882233, 1.0096015 , -2.62844995, 1.43620829, 0.70451542, -0.01532988, -1.34999693, -0.00550105, 1.18684923, -0.14400234], ] def generateDemoConfigurations(plant, context, wsg1_id, wsg2_id): demo_q = [[0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0]] initial_guess = copy(demo_q[0]) demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.10, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.40, 0.65]), initial_guess, (0.01, 0.01))) demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2+0.4), [0.7, 0.25, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.4), [0.7, 0.20, 0.9]), initial_guess, None)) initial_guess[0] = -np.pi/2 initial_guess[7] = np.pi/2 demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.09, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0.09, 1.1, 0.3]), initial_guess, None)) return demo_q def filterCollsionGeometry(scene_graph, context): filter_manager = scene_graph.collision_filter_manager(context) inspector = scene_graph.model_inspector() iiwa1 = [[], [], [], [], [], [], [], []] iiwa2 = [[], [], [], [], [], [], [], []] wsg1 = [] wsg2 = [] shelf = [] bins = [[], []] table = [] for gid in inspector.GetGeometryIds( GeometrySet(inspector.GetAllGeometryIds()), Role.kProximity): gid_name = inspector.GetName(inspector.GetFrameId(gid)) if "iiwa_1::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa1[int(link_num)].append(gid) elif "iiwa_2::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa2[int(link_num)].append(gid) elif "wsg_1" in gid_name: wsg1.append(gid) elif "wsg_2" in gid_name: wsg2.append(gid) elif "shelves::" in gid_name: shelf.append(gid) elif "binR" in gid_name: bins[0].append(gid) elif "binL" in gid_name: bins[1].append(gid) elif "table" in gid_name: table.append(gid) else: print("Geometry", gid_name, "not assigned to an object.") filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[1] + iiwa1[2]+ iiwa1[3]), GeometrySet(iiwa1[4] + iiwa1[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[3] + iiwa1[4]), GeometrySet(iiwa1[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2] + iiwa1[3] + iiwa1[4] + iiwa1[5] + iiwa1[6]), GeometrySet(iiwa1[7] + wsg1))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + iiwa1[4]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[1] + iiwa2[2]+ iiwa2[3]), GeometrySet(iiwa2[4] + iiwa2[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[3] + iiwa2[4]), GeometrySet(iiwa2[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[2] + iiwa2[3] + iiwa2[4] + iiwa2[5] + iiwa2[6]), GeometrySet(iiwa2[7] + wsg2))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + iiwa2[4]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[2]))) pairs = scene_graph.get_query_output_port().Eval(context).inspector().GetCollisionCandidates() print("Filtered collision pairs from", len(inspector.GetCollisionCandidates()), "to", len(pairs)) # initial_guess = np.concatenate((q0, q0)) # min_dist = (0.01, 0.01)??? def runBimanualIK(plant, context, wsg1_id, wsg2_id, wsg1_pose, wsg2_pose, initial_guess, min_dist=None): hand_frame1 = plant.GetBodyByName("body", wsg1_id).body_frame() hand_frame2 = plant.GetBodyByName("body", wsg2_id).body_frame() ik = InverseKinematics(plant, context) if min_dist is not None: ik.AddMinimumDistanceConstraint(*min_dist) ik.prog().AddBoundingBoxConstraint(plant.GetPositionLowerLimits(), plant.GetPositionUpperLimits(), ik.q()) ik.prog().SetInitialGuess(ik.q(), initial_guess) ik.prog().AddQuadraticCost((ik.q() - initial_guess).dot(ik.q() - initial_guess)) ik.AddPositionConstraint(hand_frame1, [0, 0, 0], plant.world_frame(), wsg1_pose.translation(), wsg1_pose.translation()) ik.AddOrientationConstraint(hand_frame1, RotationMatrix(), plant.world_frame(), wsg1_pose.rotation(), 0.001) ik.AddPositionConstraint(hand_frame2, [0, 0, 0], plant.world_frame(), wsg2_pose.translation(), wsg2_pose.translation()) ik.AddOrientationConstraint(hand_frame2, RotationMatrix(), plant.world_frame(), wsg2_pose.rotation(), 0.001) result = Solve(ik.prog()) return result.GetSolution(ik.q()) def visualizeConfig(diagram, plant, context, q): plant_context = plant.GetMyMutableContextFromRoot(context) plant.SetPositions(plant_context, q) diagram.ForcedPublish(context) def getLinearGcsPath(regions, sequence): path = [sequence[0]] run_time = 0.0
gcs = LinearGCS(regions)
1
2023-10-13 00:27:32+00:00
16k
LeapLabTHU/Rank-DETR
projects/pnp_detr/configs/models/pnp_detr_r50.py
[ { "identifier": "BasicStem", "path": "detrex/modeling/backbone/resnet.py", "snippet": "class BasicStem(CNNBlockBase):\n \"\"\"\n The standard ResNet stem (layers before the first residual block),\n with a conv, relu and max_pool.\n\n Args:\n norm (str or callable): norm after the first conv layer.\n See :func:`detectron2.layers.get_norm` for supported format.\n \"\"\"\n\n def __init__(self, in_channels=3, out_channels=64, norm=\"BN\"):\n super().__init__(in_channels, out_channels, 4)\n self.in_channels = in_channels\n self.conv1 = Conv2d(\n in_channels,\n out_channels,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n weight_init.c2_msra_fill(self.conv1)\n\n def forward(self, x):\n \"\"\"Forward function of `BasicStem`.\"\"\"\n x = self.conv1(x)\n x = F.relu_(x)\n x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)\n return x" }, { "identifier": "ResNet", "path": "detrex/modeling/backbone/resnet.py", "snippet": "class ResNet(Backbone):\n \"\"\"\n Implement paper `Deep Residual Learning for Image Recognition\n <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n stem (nn.Module): a stem module.\n stages (list[list[detectron2.layers.CNNBlockBase]]): several (typically 4) stages,\n each contains multiple :class:`detectron2.layers.CNNBlockBase`.\n num_classes (None or int): if None, will not perform classification.\n Otherwise, will create a linear layer.\n out_features (list[str]): name of the layers whose outputs should\n be returned in forward. Can be anything in \"stem\", \"linear\", or \"res2\" ...\n If None, will return the output of the last layer.\n freeze_at (int): The number of stages at the beginning to freeze.\n see :meth:`freeze` for detailed explanation.\n \"\"\"\n\n def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):\n super().__init__()\n self.stem = stem\n self.num_classes = num_classes\n\n current_stride = self.stem.stride\n self._out_feature_strides = {\"stem\": current_stride}\n self._out_feature_channels = {\"stem\": self.stem.out_channels}\n\n self.stage_names, self.stages = [], []\n\n if out_features is not None:\n # Avoid keeping unused layers in this module. They consume extra memory\n # and may cause allreduce to fail\n num_stages = max(\n [{\"res2\": 1, \"res3\": 2, \"res4\": 3, \"res5\": 4}.get(f, 0) for f in out_features]\n )\n stages = stages[:num_stages]\n for i, blocks in enumerate(stages):\n assert len(blocks) > 0, len(blocks)\n for block in blocks:\n assert isinstance(block, CNNBlockBase), block\n\n name = \"res\" + str(i + 2)\n stage = nn.Sequential(*blocks)\n\n self.add_module(name, stage)\n self.stage_names.append(name)\n self.stages.append(stage)\n\n self._out_feature_strides[name] = current_stride = int(\n current_stride * np.prod([k.stride for k in blocks])\n )\n self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels\n self.stage_names = tuple(self.stage_names) # Make it static for scripting\n\n if num_classes is not None:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(curr_channels, num_classes)\n\n # Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"The 1000-way fully-connected layer is initialized by\n # drawing weights from a zero-mean Gaussian with standard deviation of 0.01.\"\n nn.init.normal_(self.linear.weight, std=0.01)\n name = \"linear\"\n\n if out_features is None:\n out_features = [name]\n self._out_features = out_features\n assert len(self._out_features)\n children = [x[0] for x in self.named_children()]\n for out_feature in self._out_features:\n assert out_feature in children, \"Available children: {}\".format(\", \".join(children))\n self.freeze(freeze_at)\n\n def forward(self, x):\n \"\"\"\n Args:\n x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.\n\n Returns:\n dict[str->Tensor]: names and the corresponding features\n \"\"\"\n assert x.dim() == 4, f\"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!\"\n outputs = {}\n x = self.stem(x)\n if \"stem\" in self._out_features:\n outputs[\"stem\"] = x\n for name, stage in zip(self.stage_names, self.stages):\n x = stage(x)\n if name in self._out_features:\n outputs[name] = x\n if self.num_classes is not None:\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.linear(x)\n if \"linear\" in self._out_features:\n outputs[\"linear\"] = x\n return outputs\n\n def output_shape(self):\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }\n\n def freeze(self, freeze_at=0):\n \"\"\"\n Freeze the first several stages of the ResNet. Commonly used in\n fine-tuning.\n Layers that produce the same feature map spatial size are defined as one\n \"stage\" by paper `Feature Pyramid Networks for Object Detection\n <https://arxiv.org/pdf/1612.03144.pdf>`_.\n\n Args:\n freeze_at (int): number of stages to freeze.\n `1` means freezing the stem. `2` means freezing the stem and\n one residual stage, etc.\n\n Returns:\n nn.Module: this ResNet itself\n \"\"\"\n if freeze_at >= 1:\n self.stem.freeze()\n for idx, stage in enumerate(self.stages, start=2):\n if freeze_at >= idx:\n for block in stage.children():\n block.freeze()\n return self\n\n @staticmethod\n def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):\n \"\"\"\n Create a list of blocks of the same type that forms one ResNet stage.\n\n Args:\n block_class (type): a subclass of ``detectron2.layers.CNNBlockBase`` that's\n used to create all blocks in this stage. A module of this type\n must not change spatial resolution of inputs unless its stride != 1.\n num_blocks (int): number of blocks in this stage\n in_channels (int): input channels of the entire stage.\n out_channels (int): output channels of **every block** in the stage.\n kwargs: other arguments passed to the constructor of\n `block_class`. If the argument name is \"xx_per_block\", the\n argument is a list of values to be passed to each block in the\n stage. Otherwise, the same argument is passed to every block\n in the stage.\n\n Returns:\n list[detectron2.layers.CNNBlockBase]: a list of block module.\n\n Examples:\n ::\n stage = ResNet.make_stage(\n BottleneckBlock, 3, in_channels=16, out_channels=64,\n bottleneck_channels=16, num_groups=1,\n stride_per_block=[2, 1, 1],\n dilations_per_block=[1, 1, 2]\n )\n\n Usually, layers that produce the same feature map spatial size are defined as one\n \"stage\" (in paper `Feature Pyramid Networks for Object Detection\n <https://arxiv.org/pdf/1612.03144.pdf>`_).\n Under such definition, ``stride_per_block[1:]`` should all be 1.\n \"\"\"\n blocks = []\n for i in range(num_blocks):\n curr_kwargs = {}\n for k, v in kwargs.items():\n if k.endswith(\"_per_block\"):\n assert len(v) == num_blocks, (\n f\"Argument '{k}' of make_stage should have the \"\n f\"same length as num_blocks={num_blocks}.\"\n )\n newk = k[: -len(\"_per_block\")]\n assert newk not in kwargs, f\"Cannot call make_stage with both {k} and {newk}!\"\n curr_kwargs[newk] = v[i]\n else:\n curr_kwargs[k] = v\n\n blocks.append(\n block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)\n )\n in_channels = out_channels\n return blocks\n\n @staticmethod\n def make_default_stages(depth, block_class=None, **kwargs):\n \"\"\"\n Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).\n If it doesn't create the ResNet variant you need, please use :meth:`make_stage`\n instead for fine-grained customization.\n\n Args:\n depth (int): depth of ResNet\n block_class (type): the CNN block class. Has to accept\n `bottleneck_channels` argument for depth > 50.\n By default it is BasicBlock or BottleneckBlock, based on the\n depth.\n kwargs:\n other arguments to pass to `make_stage`. Should not contain\n stride and channels, as they are predefined for each depth.\n\n Returns:\n list[list[detectron2.layers.CNNBlockBase]]: modules in all stages; see arguments of\n :class:`ResNet`.\n \"\"\"\n num_blocks_per_stage = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n }[depth]\n if block_class is None:\n block_class = BasicBlock if depth < 50 else BottleneckBlock\n if depth < 50:\n in_channels = [64, 64, 128, 256]\n out_channels = [64, 128, 256, 512]\n else:\n in_channels = [64, 256, 512, 1024]\n out_channels = [256, 512, 1024, 2048]\n ret = []\n for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):\n if depth >= 50:\n kwargs[\"bottleneck_channels\"] = o // 4\n ret.append(\n ResNet.make_stage(\n block_class=block_class,\n num_blocks=n,\n stride_per_block=[s] + [1] * (n - 1),\n in_channels=i,\n out_channels=o,\n **kwargs,\n )\n )\n return ret" }, { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "SetCriterion", "path": "detrex/modeling/criterion/criterion.py", "snippet": "class SetCriterion(nn.Module):\n \"\"\"This class computes the loss for Conditional DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n matcher,\n weight_dict,\n losses: List[str] = [\"class\", \"boxes\"],\n eos_coef: float = 0.1,\n loss_class_type: str = \"focal_loss\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n \"\"\"Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n focal_alpha: alpha in Focal Loss\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.losses = losses\n self.alpha = alpha\n self.gamma = gamma\n self.eos_coef = eos_coef\n self.loss_class_type = loss_class_type\n assert loss_class_type in [\n \"ce_loss\",\n \"focal_loss\",\n ], \"only support ce loss and focal loss for computing classification loss\"\n\n if self.loss_class_type == \"ce_loss\":\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = eos_coef\n self.register_buffer(\"empty_weight\", empty_weight)\n\n def loss_labels(self, outputs, targets, indices, num_boxes):\n \"\"\"Classification loss (Binary focal loss)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"]\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2],\n self.num_classes,\n dtype=torch.int64,\n device=src_logits.device,\n )\n target_classes[idx] = target_classes_o\n\n # Computation classification loss\n if self.loss_class_type == \"ce_loss\":\n loss_class = F.cross_entropy(\n src_logits.transpose(1, 2), target_classes, self.empty_weight\n )\n elif self.loss_class_type == \"focal_loss\":\n # src_logits: (b, num_queries, num_classes) = (2, 300, 80)\n # target_classes_one_hot = (2, 300, 80)\n target_classes_onehot = torch.zeros(\n [src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n dtype=src_logits.dtype,\n layout=src_logits.layout,\n device=src_logits.device,\n )\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n loss_class = (\n sigmoid_focal_loss(\n src_logits,\n target_classes_onehot,\n num_boxes=num_boxes,\n alpha=self.alpha,\n gamma=self.gamma,\n )\n * src_logits.shape[1]\n )\n\n losses = {\"loss_class\": loss_class}\n\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n \"\"\"\n assert \"pred_boxes\" in outputs\n idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs[\"pred_boxes\"][idx]\n target_boxes = torch.cat([t[\"boxes\"][i] for t, (_, i) in zip(targets, indices)], dim=0)\n\n loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction=\"none\")\n\n losses = {}\n losses[\"loss_bbox\"] = loss_bbox.sum() / num_boxes\n\n loss_giou = 1 - torch.diag(\n generalized_box_iou(\n box_cxcywh_to_xyxy(src_boxes),\n box_cxcywh_to_xyxy(target_boxes),\n )\n )\n losses[\"loss_giou\"] = loss_giou.sum() / num_boxes\n\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n \"class\": self.loss_labels,\n \"boxes\": self.loss_boxes,\n }\n assert loss in loss_map, f\"do you really want to compute {loss} loss?\"\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs, targets, return_indices=False):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n\n return_indices: used for vis. if True, the layer0-5 indices will be returned as well.\n\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n if return_indices:\n indices0_copy = indices\n indices_list = []\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n if return_indices:\n indices_list.append(indices)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n if return_indices:\n indices_list.append(indices0_copy)\n return losses, indices_list\n\n return losses\n\n\n def __repr__(self):\n head = \"Criterion \" + self.__class__.__name__\n body = [\n \"matcher: {}\".format(self.matcher.__repr__(_repr_indent=8)),\n \"losses: {}\".format(self.losses),\n \"loss_class_type: {}\".format(self.loss_class_type),\n \"weight_dict: {}\".format(self.weight_dict),\n \"num_classes: {}\".format(self.num_classes),\n \"eos_coef: {}\".format(self.eos_coef),\n \"focal loss alpha: {}\".format(self.alpha),\n \"focal loss gamma: {}\".format(self.gamma),\n ]\n _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "PositionEmbeddingSine", "path": "detrex/layers/position_embedding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"Sinusoidal position embedding used in DETR model.\n\n Please see `End-to-End Object Detection with Transformers\n <https://arxiv.org/pdf/2005.12872>`_ for more details.\n\n Args:\n num_pos_feats (int): The feature dimension for each position along\n x-axis or y-axis. The final returned dimension for each position\n is 2 times of the input value.\n temperature (int, optional): The temperature used for scaling\n the position embedding. Default: 10000.\n scale (float, optional): A scale factor that scales the position\n embedding. The scale will be used only when `normalize` is True.\n Default: 2*pi.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default: 1e-6.\n offset (float): An offset added to embed when doing normalization.\n normalize (bool, optional): Whether to normalize the position embedding.\n Default: False.\n \"\"\"\n\n def __init__(\n self,\n num_pos_feats: int = 64,\n temperature: int = 10000,\n scale: float = 2 * math.pi,\n eps: float = 1e-6,\n offset: float = 0.0,\n normalize: bool = False,\n ):\n super().__init__()\n if normalize:\n assert isinstance(scale, (float, int)), (\n \"when normalize is set,\"\n \"scale should be provided and in float or int type, \"\n f\"found {type(scale)}\"\n )\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n self.scale = scale\n self.eps = eps\n self.offset = offset\n\n def forward(self, mask: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"Forward function for `PositionEmbeddingSine`.\n\n Args:\n mask (torch.Tensor): ByteTensor mask. Non-zero values representing\n ignored positions, while zero values means valid positions\n for the input tensor. Shape as `(bs, h, w)`.\n\n Returns:\n torch.Tensor: Returned position embedding with\n shape `(bs, num_pos_feats * 2, h, w)`\n \"\"\"\n assert mask is not None\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n y_embed = (y_embed + self.offset) / (y_embed[:, -1:, :] + self.eps) * self.scale\n x_embed = (x_embed + self.offset) / (x_embed[:, :, -1:] + self.eps) * self.scale\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device)\n dim_t = self.temperature ** (\n 2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / self.num_pos_feats\n )\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n\n # use view as mmdet instead of flatten for dynamically exporting to ONNX\n B, H, W = mask.size()\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos" }, { "identifier": "PnPDETR", "path": "projects/pnp_detr/modeling/detr.py", "snippet": "class PnPDETR(nn.Module):\n \"\"\"Implement DETR in `End-to-End Object Detection with Transformers\n <https://arxiv.org/abs/2005.12872>`_\n\n Args:\n backbone (nn.Module): Backbone module for feature extraction.\n in_features (List[str]): Selected backbone output features for transformer module.\n in_channels (int): Dimension of the last feature in `in_features`.\n position_embedding (nn.Module): Position encoding layer for generating position embeddings.\n transformer (nn.Module): Transformer module used for further processing features\n and input queries.\n embed_dim (int): Hidden dimension for transformer module.\n num_classes (int): Number of total categories.\n num_queries (int): Number of proposal dynamic anchor boxes in Transformer\n criterion (nn.Module): Criterion for calculating the total losses.\n aux_loss (bool): Whether to calculate auxiliary loss in criterion. Default: True.\n pixel_mean (List[float]): Pixel mean value for image normalization.\n Default: [123.675, 116.280, 103.530].\n pixel_std (List[float]): Pixel std value for image normalization.\n Default: [58.395, 57.120, 57.375].\n device (str): Training device. Default: \"cuda\".\n \"\"\"\n\n def __init__(\n self,\n backbone: nn.Module,\n in_features: List[str],\n in_channels: int,\n position_embedding: nn.Module,\n transformer: nn.Module,\n embed_dim: int,\n num_classes: int,\n num_queries: int,\n test_time_sample_ratio: float,\n criterion: nn.Module,\n aux_loss: bool = True,\n pixel_mean: List[float] = [123.675, 116.280, 103.530],\n pixel_std: List[float] = [58.395, 57.120, 57.375],\n device: str = \"cuda\",\n ):\n super().__init__()\n # define backbone and position embedding module\n self.backbone = backbone\n self.in_features = in_features\n self.position_embedding = position_embedding\n\n # project the backbone output feature\n # into the required dim for transformer block\n self.input_proj = nn.Conv2d(in_channels, embed_dim, kernel_size=1)\n\n # define learnable object queries and transformer module\n self.transformer = transformer\n self.query_embed = nn.Embedding(num_queries, embed_dim)\n\n # define classification head and box head\n self.class_embed = nn.Linear(embed_dim, num_classes + 1)\n self.bbox_embed = MLP(input_dim=embed_dim, hidden_dim=embed_dim, output_dim=4, num_layers=3)\n self.num_classes = num_classes\n\n # test time sampling ratio\n self.test_time_sample_ratio = test_time_sample_ratio\n\n # where to calculate auxiliary loss in criterion\n self.aux_loss = aux_loss\n self.criterion = criterion\n\n # normalizer for input raw images\n self.device = device\n pixel_mean = torch.Tensor(pixel_mean).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(pixel_std).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n\n def forward(self, batched_inputs):\n \"\"\"Forward function of `DAB-DETR` which excepts a list of dict as inputs.\n\n Args:\n batched_inputs (List[dict]): A list of instance dict, and each dict must consists of:\n - dict[\"image\"] (torch.Tensor): The unnormalized image tensor.\n - dict[\"height\"] (int): The original image height.\n - dict[\"width\"] (int): The original image width.\n - dict[\"instance\"] (detectron2.structures.Instances):\n Image meta informations and ground truth boxes and labels during training.\n Please refer to\n https://detectron2.readthedocs.io/en/latest/modules/structures.html#detectron2.structures.Instances\n for the basic usage of Instances.\n\n Returns:\n dict: Returns a dict with the following elements:\n - dict[\"pred_logits\"]: the classification logits for all queries.\n with shape ``[batch_size, num_queries, num_classes]``\n - dict[\"pred_boxes\"]: The normalized boxes coordinates for all queries in format\n ``(x, y, w, h)``. These values are normalized in [0, 1] relative to the size of\n each individual image (disregarding possible padding). See PostProcess for information\n on how to retrieve the unnormalized bounding box.\n - dict[\"aux_outputs\"]: Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n images = self.preprocess_image(batched_inputs)\n\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n img_h, img_w = batched_inputs[img_id][\"instances\"].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n\n # only use last level feature in DETR\n features = self.backbone(images.tensor)[self.in_features[-1]]\n features = self.input_proj(features)\n img_masks = F.interpolate(img_masks[None], size=features.shape[-2:]).to(torch.bool)[0]\n pos_embed = self.position_embedding(img_masks)\n\n hidden_states, sample_reg_loss = self.transformer(features, img_masks, self.query_embed.weight, pos_embed, self.test_time_sample_ratio)\n\n outputs_class = self.class_embed(hidden_states)\n outputs_coord = self.bbox_embed(hidden_states).sigmoid()\n output = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.aux_loss:\n output[\"aux_outputs\"] = self._set_aux_loss(outputs_class, outputs_coord)\n output[\"sample_reg_loss\"] = sample_reg_loss\n\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n loss_dict = self.criterion(output, targets)\n loss_dict[\"sample_reg_loss\"] = output[\"sample_reg_loss\"]\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n return processed_results\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [\n {\"pred_logits\": a, \"pred_boxes\": b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n ]\n\n def inference(self, box_cls, box_pred, image_sizes):\n \"\"\"Inference function for DETR\n\n Args:\n box_cls (torch.Tensor): tensor of shape ``(batch_size, num_queries, K)``.\n The tensor predicts the classification probability for each query.\n box_pred (torch.Tensor): tensors of shape ``(batch_size, num_queries, 4)``.\n The tensor predicts 4-vector ``(x, y, w, h)`` box\n regression values for every queryx\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n assert len(box_cls) == len(image_sizes)\n results = []\n\n # For each box we assign the best class or the second best if the best on is `no_object`.\n scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)\n\n for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(\n zip(scores, labels, box_pred, image_sizes)\n ):\n result = Instances(image_size)\n result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))\n result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])\n result.scores = scores_per_image\n result.pred_classes = labels_per_image\n results.append(result)\n return results\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n h, w = targets_per_image.image_size\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)\n gt_classes = targets_per_image.gt_classes\n gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy\n gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n new_targets.append({\"labels\": gt_classes, \"boxes\": gt_boxes})\n return new_targets\n\n def preprocess_image(self, batched_inputs):\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n images = ImageList.from_tensors(images)\n return images" }, { "identifier": "PnPDetrTransformerEncoder", "path": "projects/pnp_detr/modeling/transformer.py", "snippet": "class PnPDetrTransformerEncoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n attn_dropout: float = 0.1,\n feedforward_dim: int = 2048,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n post_norm: bool = True,\n batch_first: bool = False,\n ):\n super(PnPDetrTransformerEncoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=MultiheadAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n attn_drop=attn_dropout,\n batch_first=batch_first,\n ),\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(\n normalized_shape=embed_dim,\n ),\n operation_order=(\"self_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.embed_dim = self.layers[0].embed_dim\n self.pre_norm = self.layers[0].pre_norm\n\n if post_norm:\n self.post_norm_layer = nn.LayerNorm(self.embed_dim)\n else:\n self.post_norm_layer = None\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n **kwargs,\n ):\n\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.post_norm_layer is not None:\n query = self.post_norm_layer(query)\n return query" }, { "identifier": "PnPDetrTransformerDecoder", "path": "projects/pnp_detr/modeling/transformer.py", "snippet": "class PnPDetrTransformerDecoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n attn_dropout: float = 0.1,\n feedforward_dim: int = 2048,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n post_norm: bool = True,\n return_intermediate: bool = True,\n batch_first: bool = False,\n ):\n super(PnPDetrTransformerDecoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=MultiheadAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n attn_drop=attn_dropout,\n batch_first=batch_first,\n ),\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(\n normalized_shape=embed_dim,\n ),\n operation_order=(\"self_attn\", \"norm\", \"cross_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.return_intermediate = return_intermediate\n self.embed_dim = self.layers[0].embed_dim\n\n if post_norm:\n self.post_norm_layer = nn.LayerNorm(self.embed_dim)\n else:\n self.post_norm_layer = None\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n **kwargs,\n ):\n\n if not self.return_intermediate:\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.post_norm_layer is not None:\n query = self.post_norm_layer(query)[None]\n return query\n\n # return intermediate\n intermediate = []\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.return_intermediate:\n if self.post_norm_layer is not None:\n intermediate.append(self.post_norm_layer(query))\n else:\n intermediate.append(query)\n\n return torch.stack(intermediate)" }, { "identifier": "PnPDetrTransformer", "path": "projects/pnp_detr/modeling/transformer.py", "snippet": "class PnPDetrTransformer(nn.Module):\n def __init__(\n self, \n encoder=None, \n decoder=None,\n sample_topk_ratio=1/3.,\n score_pred_net='2layer-fc-256',\n kproj_net='2layer-fc',\n unsample_abstract_number=30,\n pos_embed_kproj=False,\n ):\n super(PnPDetrTransformer, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.embed_dim = self.encoder.embed_dim\n\n self.sampler = SortSampler(\n sample_topk_ratio, \n self.embed_dim, \n score_pred_net=score_pred_net, \n kproj_net=kproj_net, \n unsample_abstract_number=unsample_abstract_number, \n pos_embed_kproj=pos_embed_kproj\n )\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, x, mask, query_embed, pos_embed, sample_ratio):\n bs, c, h, w = x.shape\n pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1)\n query_embed = query_embed.unsqueeze(1).repeat(\n 1, bs, 1\n ) # [num_query, dim] -> [num_query, bs, dim]\n mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w]\n x, sample_reg_loss, sort_confidence_topk, mask, pos_embed = self.sampler(x, mask, pos_embed, sample_ratio)\n memory = self.encoder(\n query=x,\n key=None,\n value=None,\n query_pos=pos_embed,\n query_key_padding_mask=mask,\n )\n target = torch.zeros_like(query_embed)\n decoder_output = self.decoder(\n query=target,\n key=memory,\n value=memory,\n key_pos=pos_embed,\n query_pos=query_embed,\n key_padding_mask=mask,\n )\n decoder_output = decoder_output.transpose(1, 2)\n return decoder_output, sample_reg_loss" } ]
from detectron2.config import LazyCall as L from detrex.modeling.backbone import ResNet, BasicStem from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.criterion.criterion import SetCriterion from detrex.layers.position_embedding import PositionEmbeddingSine from projects.pnp_detr.modeling import ( PnPDETR, PnPDetrTransformer, PnPDetrTransformerEncoder, PnPDetrTransformerDecoder, )
11,842
model = L(PnPDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res2", "res3", "res4", "res5"], freeze_at=1, ), in_features=["res5"], in_channels=2048, position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, ),
model = L(PnPDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res2", "res3", "res4", "res5"], freeze_at=1, ), in_features=["res5"], in_channels=2048, position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, ),
transformer=L(PnPDetrTransformer)(
8
2023-10-12 03:02:25+00:00
16k
ByungKwanLee/Full-Segment-Anything
mask_generator.py
[ { "identifier": "Sam", "path": "modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n \n\n # Batch Individual Mask Generation by LBK\n @torch.no_grad()\n def individual_forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n is_low_resol: bool = False,\n ) -> List[Dict[str, torch.Tensor]]:\n \n input_images = torch.stack([self.lbk_preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n refined_mask_outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Progressing Intergraion.. by LBK\n refined_masks = self.postprocess_small_regions(low_res_masks, iou_predictions, *input_images.shape[2:], is_low_resol)\n if not is_low_resol:\n refined_masks = F.interpolate(\n refined_masks.unsqueeze(1).float(),\n input_images.shape[2:],\n mode=\"bilinear\",\n align_corners=False,\n ).squeeze(1).bool()\n refined_mask_outputs.append(refined_masks)\n \n return refined_mask_outputs\n \n # PostProcess by LBK EDIT\n def postprocess_small_regions(self, masks, iou_predictions, orig_h, orig_w, is_low_resol):\n\n\n \"\"\"\n Configuration\n \"\"\"\n # pred_iou_thresh = 0.85\n # stability_score_offset = 1.0\n # stability_score_thresh = 0.85\n # box_nms_thresh = 0.7\n\n\n pred_iou_thresh = 0.7\n stability_score_offset = 1.0\n stability_score_thresh = 0.7\n box_nms_thresh = 0.7\n\n # Interpolation\n if not is_low_resol:\n masks = F.interpolate(\n masks,\n (orig_h, orig_w),\n mode=\"bilinear\",\n align_corners=False,\n )\n else:\n orig_h, orig_w = masks.shape[2:]\n\n # Serialize predictions and store in MaskData\n data = MaskData(\n masks=masks.flatten(0, 1),\n iou_preds=iou_predictions.flatten(0, 1), \n )\n\n # Filter by predicted IoU\n if pred_iou_thresh > 0.0:\n keep_mask = data[\"iou_preds\"] > pred_iou_thresh\n data.filter(keep_mask)\n\n # Calculate stability score\n data[\"stability_score\"] = calculate_stability_score(\n data[\"masks\"], self.mask_threshold, stability_score_offset\n )\n if stability_score_thresh > 0.0:\n keep_mask = data[\"stability_score\"] >= stability_score_thresh\n data.filter(keep_mask)\n\n # Threshold masks and calculate boxes\n data[\"masks\"] = data[\"masks\"] > self.mask_threshold\n data[\"boxes\"] = batched_mask_to_box(data[\"masks\"])\n\n # Filter boxes that touch crop boundaries\n keep_mask = ~is_box_near_crop_edge(data[\"boxes\"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h])\n if not torch.all(keep_mask):\n data.filter(keep_mask)\n data['masks'] = uncrop_masks(data[\"masks\"], [0, 0, orig_w, orig_h], orig_h, orig_w)\n\n # Remove duplicates within this crop.\n keep_by_nms = batched_nms(\n data[\"boxes\"].float(),\n data[\"iou_preds\"],\n torch.zeros_like(data[\"boxes\"][:, 0]), # categories\n iou_threshold=box_nms_thresh,\n )\n data.filter(keep_by_nms)\n\n # making masks\n return data['masks']\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n \n # by lbk edit\n def lbk_preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n return x" }, { "identifier": "SamPredictor", "path": "predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from modeling import Sam from predictor import SamPredictor from utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,848
""" Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle":
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle":
mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
9
2023-10-13 20:07:42+00:00
16k
sakemin/cog-musicgen-remixer
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the chroma extraction.\n radix2_exp (int): Size of stft window for the chroma extraction (power of 2, e.g. 12 -> 2^12).\n nfft (int, optional): Number of FFT.\n winlen (int, optional): Window length.\n winhop (int, optional): Window hop size.\n argmax (bool, optional): Whether to use argmax. Defaults to False.\n norm (float, optional): Norm for chroma normalization. Defaults to inf.\n \"\"\"\n def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, nfft: tp.Optional[int] = None,\n winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, argmax: bool = False,\n norm: float = torch.inf):\n super().__init__()\n self.winlen = winlen or 2 ** radix2_exp\n self.nfft = nfft or self.winlen\n self.winhop = winhop or (self.winlen // 4)\n self.sample_rate = sample_rate\n self.n_chroma = n_chroma\n self.norm = norm\n self.argmax = argmax\n self.register_buffer('fbanks', torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,\n n_chroma=self.n_chroma)), persistent=False)\n self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,\n hop_length=self.winhop, power=2, center=True,\n pad=0, normalized=True)\n\n def forward(self, wav: torch.Tensor) -> torch.Tensor:\n T = wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.nfft:\n pad = self.nfft - T\n r = 0 if pad % 2 == 0 else 1\n wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)\n assert wav.shape[-1] == self.nfft, f\"expected len {self.nfft} but got {wav.shape[-1]}\"\n\n spec = self.spec(wav).squeeze(1)\n raw_chroma = torch.einsum('cf,...ft->...ct', self.fbanks, spec)\n norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)\n norm_chroma = rearrange(norm_chroma, 'b d t -> b t d')\n\n if self.argmax:\n idx = norm_chroma.argmax(-1, keepdim=True)\n norm_chroma[:] = 0\n norm_chroma.scatter_(dim=-1, index=idx, value=1)\n\n return norm_chroma" }, { "identifier": "ChordExtractor", "path": "audiocraft/modules/chord_chroma.py", "snippet": "class ChordExtractor(nn.Module):\n\n def __init__(self, device, sample_rate, max_duration, chroma_len, n_chroma, winhop):\n super().__init__()\n self.config = HParams.load(\"/src/audiocraft/modules/btc/run_config.yaml\") #gotta specify the path for run_config.yaml of btc\n\n # self.config.feature['large_voca'] = False\n # self.config.model['num_chords'] = 25\n\n self.model_file = '/src/audiocraft/modules/btc/test/btc_model_large_voca.pt'\n # self.model_file = 'audiocraft/modules/btc/test/btc_model.pt'\n self.idx_to_chord = idx2voca_chord()\n self.sr = sample_rate\n\n self.n_chroma = n_chroma\n self.max_duration = max_duration\n self.chroma_len = chroma_len\n self.to_timebin = self.max_duration/self.chroma_len\n self.timebin = winhop\n\n self.chords = chords.Chords()\n self.device = device\n\n self.denoise_window_size = 7\n self.denoise_threshold = 0.5\n \n self.model = BTC_model(config=self.config.model).to(device)\n if os.path.isfile(self.model_file):\n checkpoint = torch.load(self.model_file)\n self.mean = checkpoint['mean']\n self.std = checkpoint['std']\n self.model.load_state_dict(checkpoint['model'])\n\n def forward(self, wavs:torch.Tensor) -> torch.Tensor:\n sr = self.config.mp3['song_hz']\n chromas = []\n for wav in wavs:\n original_wav = librosa.resample(wav.cpu().numpy(), orig_sr=self.sr, target_sr=sr)\n original_wav = original_wav.squeeze(0)\n # print(original_wav.shape)\n T = original_wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.timebin//4:\n pad = self.timebin//4 - T\n r = 0 if pad % 2 == 0 else 1\n original_wav = F.pad(torch.Tensor(original_wav), (pad // 2, pad // 2 + r), 'constant', 0)\n original_wav = original_wav.numpy()\n assert original_wav.shape[-1] == self.timebin//4, f\"expected len {self.timebin//4} but got {original_wav.shape[-1]}\"\n # print(original_wav.shape)\n #preprocess\n currunt_sec_hz = 0\n\n while len(original_wav) > currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len']:\n start_idx = int(currunt_sec_hz)\n end_idx = int(currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len'])\n tmp = librosa.cqt(original_wav[start_idx:end_idx], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n if start_idx == 0:\n feature = tmp\n else:\n feature = np.concatenate((feature, tmp), axis=1)\n currunt_sec_hz = end_idx\n \n if currunt_sec_hz == 0:\n feature = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n else:\n tmp = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n feature = np.concatenate((feature, tmp), axis=1)\n # print(feature.shape)\n feature = np.log(np.abs(feature) + 1e-6)\n # print(feature)\n feature_per_second = self.config.mp3['inst_len'] / self.config.model['timestep']\n song_length_second = len(original_wav)/self.config.mp3['song_hz']\n\n feature = feature.T\n feature = (feature - self.mean)/self.std\n\n time_unit = feature_per_second\n n_timestep = self.config.model['timestep']\n\n num_pad = n_timestep - (feature.shape[0] % n_timestep)\n feature = np.pad(feature, ((0, num_pad), (0, 0)), mode=\"constant\", constant_values=0)\n num_instance = feature.shape[0] // n_timestep\n\n #inference\n start_time = 0.0\n lines = []\n with torch.no_grad():\n self.model.eval()\n feature = torch.tensor(feature, dtype=torch.float32).unsqueeze(0).to(self.device)\n for t in range(num_instance):\n self_attn_output, _ = self.model.self_attn_layers(feature[:, n_timestep * t:n_timestep * (t + 1), :])\n prediction, _ = self.model.output_layer(self_attn_output)\n prediction = prediction.squeeze()\n for i in range(n_timestep):\n if t == 0 and i == 0:\n prev_chord = prediction[i].item()\n continue\n if prediction[i].item() != prev_chord:\n lines.append(\n '%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n start_time = time_unit * (n_timestep * t + i)\n prev_chord = prediction[i].item()\n if t == num_instance - 1 and i + num_pad == n_timestep:\n if start_time != time_unit * (n_timestep * t + i):\n lines.append('%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n break\n\n strlines = ''.join(lines)\n\n chroma = []\n\n count = 0\n for line in lines:\n if count >= self.chroma_len: \n break\n splits = line.split()\n if len(splits) == 3:\n s = splits[0]\n e = splits[1]\n l = splits[2]\n\n crd = self.chords.chord(l)\n \n if crd[0] == -1:\n multihot = torch.Tensor(crd[2])\n else:\n multihot = torch.concat([torch.Tensor(crd[2])[-crd[0]:],torch.Tensor(crd[2])[:-crd[0]]])\n start_bin = round(float(s)/self.to_timebin)\n end_bin = round(float(e)/self.to_timebin)\n for j in range(start_bin,end_bin):\n if count >= self.chroma_len: \n break\n chroma.append(multihot)\n count += 1\n \n chroma = torch.stack(chroma, dim=0)\n\n # Denoising chroma\n kernel = torch.ones(self.denoise_window_size)/self.denoise_window_size\n\n filtered_signals = []\n for i in range(chroma.shape[-1]):\n filtered_signals.append(torch.nn.functional.conv1d(chroma[...,i].unsqueeze(0),\n kernel.unsqueeze(0).unsqueeze(0).to(chroma.device), \n padding=(self.denoise_window_size - 1) // 2))\n filtered_signals = torch.stack(filtered_signals, dim=-1)\n filtered_signals = filtered_signals > self.denoise_threshold\n\n chromas.append(filtered_signals.squeeze(0))\n \n return torch.stack(chromas, dim=0).to(self.device)" }, { "identifier": "StreamingModule", "path": "audiocraft/modules/streaming.py", "snippet": "class StreamingModule(nn.Module):\n \"\"\"Common API for streaming components.\n\n Each streaming component has a streaming state, which is just a dict[str, Tensor].\n By convention, the first dim of each tensor must be the batch size.\n Don't use dots in the key names, as this would clash with submodules\n (like in state_dict).\n\n If `self._is_streaming` is True, the component should use and remember\n the proper state inside `self._streaming_state`.\n\n To set a streaming component in streaming state, use\n\n with module.streaming():\n ...\n\n This will automatically reset the streaming state when exiting the context manager.\n This also automatically propagates to all streaming children module.\n\n Some module might also implement the `StreamingModule.flush` method, although\n this one is trickier, as all parents module must be StreamingModule and implement\n it as well for it to work properly. See `StreamingSequential` after.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self._streaming_state: State = {}\n self._is_streaming = False\n\n def _apply_named_streaming(self, fn: tp.Any):\n for name, module in self.named_modules():\n if isinstance(module, StreamingModule):\n fn(name, module)\n\n def _set_streaming(self, streaming: bool):\n def _set_streaming(name, module):\n module._is_streaming = streaming\n self._apply_named_streaming(_set_streaming)\n\n @contextmanager\n def streaming(self):\n \"\"\"Context manager to enter streaming mode. Reset streaming state on exit.\"\"\"\n self._set_streaming(True)\n try:\n yield\n finally:\n self._set_streaming(False)\n self.reset_streaming()\n\n def reset_streaming(self):\n \"\"\"Reset the streaming state.\"\"\"\n def _reset(name: str, module: StreamingModule):\n module._streaming_state.clear()\n\n self._apply_named_streaming(_reset)\n\n def get_streaming_state(self) -> State:\n \"\"\"Return the streaming state, including that of sub-modules.\"\"\"\n state: State = {}\n\n def _add(name: str, module: StreamingModule):\n if name:\n name += \".\"\n for key, value in module._streaming_state.items():\n state[name + key] = value\n\n self._apply_named_streaming(_add)\n return state\n\n def set_streaming_state(self, state: State):\n \"\"\"Set the streaming state, including that of sub-modules.\"\"\"\n state = dict(state)\n\n def _set(name: str, module: StreamingModule):\n if name:\n name += \".\"\n module._streaming_state.clear()\n for key, value in list(state.items()):\n # complexity is not ideal here, but probably fine.\n if key.startswith(name):\n local_key = key[len(name):]\n if '.' not in local_key:\n module._streaming_state[local_key] = value\n del state[key]\n\n self._apply_named_streaming(_set)\n assert len(state) == 0, list(state.keys())\n\n def flush(self, x: tp.Optional[torch.Tensor] = None):\n \"\"\"Flush any remaining outputs that were waiting for completion.\n Typically, for convolutions, this will add the final padding\n and process the last buffer.\n\n This should take an optional argument `x`, which will be provided\n if a module before this one in the streaming pipeline has already\n spitted out a flushed out buffer.\n \"\"\"\n if x is None:\n return None\n else:\n return self(x)" }, { "identifier": "create_sin_embedding", "path": "audiocraft/modules/transformer.py", "snippet": "def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,\n dtype: torch.dtype = torch.float32) -> torch.Tensor:\n \"\"\"Create sinusoidal positional embedding, with shape `[B, T, C]`.\n\n Args:\n positions (torch.Tensor): LongTensor of positions.\n dim (int): Dimension of the embedding.\n max_period (float): Maximum period of the cosine/sine functions.\n dtype (torch.dtype or str): dtype to use to generate the embedding.\n Returns:\n torch.Tensor: Sinusoidal positional embedding.\n \"\"\"\n # We aim for BTC format\n assert dim % 2 == 0\n half_dim = dim // 2\n positions = positions.to(dtype)\n adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)\n max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point\n phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))\n return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)" }, { "identifier": "audio_read", "path": "audiocraft/data/audio.py", "snippet": "def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,\n duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:\n \"\"\"Read audio by picking the most appropriate backend tool based on the audio format.\n\n Args:\n filepath (str or Path): Path to audio file to read.\n seek_time (float): Time at which to start reading in the file.\n duration (float): Duration to read from the file. If set to -1, the whole file is read.\n pad (bool): Pad output audio if not reaching expected duration.\n Returns:\n tuple of torch.Tensor, int: Tuple containing audio data and sample rate.\n \"\"\"\n fp = Path(filepath)\n if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg\n # There is some bug with ffmpeg and reading flac\n info = _soundfile_info(filepath)\n frames = -1 if duration <= 0 else int(duration * info.sample_rate)\n frame_offset = int(seek_time * info.sample_rate)\n wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)\n assert info.sample_rate == sr, f\"Mismatch of sample rates {info.sample_rate} {sr}\"\n wav = torch.from_numpy(wav).t().contiguous()\n if len(wav.shape) == 1:\n wav = torch.unsqueeze(wav, 0)\n else:\n wav, sr = _av_read(filepath, seek_time, duration)\n if pad and duration > 0:\n expected_frames = int(duration * sr)\n wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))\n return wav, sr" }, { "identifier": "SegmentInfo", "path": "audiocraft/data/audio_dataset.py", "snippet": "class SegmentInfo(BaseInfo):\n meta: AudioMeta\n seek_time: float\n # The following values are given once the audio is processed, e.g.\n # at the target sample rate and target number of channels.\n n_frames: int # actual number of frames without padding\n total_frames: int # total number of frames, padding included\n sample_rate: int # actual sample rate\n channels: int # number of audio channels." }, { "identifier": "convert_audio", "path": "audiocraft/data/audio_utils.py", "snippet": "def convert_audio(wav: torch.Tensor, from_rate: float,\n to_rate: float, to_channels: int) -> torch.Tensor:\n \"\"\"Convert audio to new sample rate and number of audio channels.\"\"\"\n wav = julius.resample_frac(wav, int(from_rate), int(to_rate))\n wav = convert_audio_channels(wav, to_channels)\n return wav" }, { "identifier": "AudioCraftEnvironment", "path": "audiocraft/environment.py", "snippet": "class AudioCraftEnvironment:\n \"\"\"Environment configuration for teams and clusters.\n\n AudioCraftEnvironment picks compute cluster settings (slurm, dora) from the current running environment\n or declared variable and the loaded team configuration. Additionally, the AudioCraftEnvironment\n provides pointers to a reference folder resolved automatically across clusters that is shared across team members,\n allowing to share sigs or other files to run jobs. Finally, it provides dataset mappers to automatically\n map dataset file paths to new locations across clusters, allowing to use the same manifest of files across cluters.\n\n The cluster type is identified automatically and base configuration file is read from config/teams.yaml.\n Use the following environment variables to specify the cluster, team or configuration:\n\n AUDIOCRAFT_CLUSTER (optional): Cluster type to enforce. Useful if the cluster type\n cannot be inferred automatically.\n AUDIOCRAFT_CONFIG (optional): Path to yaml config holding the teams configuration.\n If not set, configuration is read from config/teams.yaml.\n AUDIOCRAFT_TEAM (optional): Name of the team. Recommended to set to your own team.\n Cluster configuration are shared across teams to match compute allocation,\n specify your cluster configuration in the configuration file under a key mapping\n your team name.\n \"\"\"\n _instance = None\n DEFAULT_TEAM = \"default\"\n\n def __init__(self) -> None:\n \"\"\"Loads configuration.\"\"\"\n self.team: str = os.getenv(\"AUDIOCRAFT_TEAM\", self.DEFAULT_TEAM)\n cluster_type = _guess_cluster_type()\n cluster = os.getenv(\n \"AUDIOCRAFT_CLUSTER\", cluster_type.value\n )\n logger.info(\"Detecting cluster type %s\", cluster_type)\n\n self.cluster: str = cluster\n\n config_path = os.getenv(\n \"AUDIOCRAFT_CONFIG\",\n Path(__file__)\n .parent.parent.joinpath(\"config/teams\", self.team)\n .with_suffix(\".yaml\"),\n )\n self.config = omegaconf.OmegaConf.load(config_path)\n self._dataset_mappers = []\n cluster_config = self._get_cluster_config()\n if \"dataset_mappers\" in cluster_config:\n for pattern, repl in cluster_config[\"dataset_mappers\"].items():\n regex = re.compile(pattern)\n self._dataset_mappers.append((regex, repl))\n\n def _get_cluster_config(self) -> omegaconf.DictConfig:\n assert isinstance(self.config, omegaconf.DictConfig)\n return self.config[self.cluster]\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n @classmethod\n def reset(cls):\n \"\"\"Clears the environment and forces a reload on next invocation.\"\"\"\n cls._instance = None\n\n @classmethod\n def get_team(cls) -> str:\n \"\"\"Gets the selected team as dictated by the AUDIOCRAFT_TEAM env var.\n If not defined, defaults to \"labs\".\n \"\"\"\n return cls.instance().team\n\n @classmethod\n def get_cluster(cls) -> str:\n \"\"\"Gets the detected cluster.\n This value can be overridden by the AUDIOCRAFT_CLUSTER env var.\n \"\"\"\n return cls.instance().cluster\n\n @classmethod\n def get_dora_dir(cls) -> Path:\n \"\"\"Gets the path to the dora directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_DORA_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n dora_dir = os.getenv(\"AUDIOCRAFT_DORA_DIR\", cluster_config[\"dora_dir\"])\n logger.warning(f\"Dora directory: {dora_dir}\")\n return Path(dora_dir)\n\n @classmethod\n def get_reference_dir(cls) -> Path:\n \"\"\"Gets the path to the reference directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_REFERENCE_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return Path(os.getenv(\"AUDIOCRAFT_REFERENCE_DIR\", cluster_config[\"reference_dir\"]))\n\n @classmethod\n def get_slurm_exclude(cls) -> tp.Optional[str]:\n \"\"\"Get the list of nodes to exclude for that cluster.\"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return cluster_config.get(\"slurm_exclude\")\n\n @classmethod\n def get_slurm_partitions(cls, partition_types: tp.Optional[tp.List[str]] = None) -> str:\n \"\"\"Gets the requested partitions for the current team and cluster as a comma-separated string.\n\n Args:\n partition_types (list[str], optional): partition types to retrieve. Values must be\n from ['global', 'team']. If not provided, the global partition is returned.\n \"\"\"\n if not partition_types:\n partition_types = [\"global\"]\n\n cluster_config = cls.instance()._get_cluster_config()\n partitions = [\n cluster_config[\"partitions\"][partition_type]\n for partition_type in partition_types\n ]\n return \",\".join(partitions)\n\n @classmethod\n def resolve_reference_path(cls, path: tp.Union[str, Path]) -> Path:\n \"\"\"Converts reference placeholder in path with configured reference dir to resolve paths.\n\n Args:\n path (str or Path): Path to resolve.\n Returns:\n Path: Resolved path.\n \"\"\"\n path = str(path)\n\n if path.startswith(\"//reference\"):\n reference_dir = cls.get_reference_dir()\n logger.warn(f\"Reference directory: {reference_dir}\")\n assert (\n reference_dir.exists() and reference_dir.is_dir()\n ), f\"Reference directory does not exist: {reference_dir}.\"\n path = re.sub(\"^//reference\", str(reference_dir), path)\n\n return Path(path)\n\n @classmethod\n def apply_dataset_mappers(cls, path: str) -> str:\n \"\"\"Applies dataset mapping regex rules as defined in the configuration.\n If no rules are defined, the path is returned as-is.\n \"\"\"\n instance = cls.instance()\n\n for pattern, repl in instance._dataset_mappers:\n path = pattern.sub(repl, path)\n\n return path" }, { "identifier": "ResidualVectorQuantizer", "path": "audiocraft/quantization/vq.py", "snippet": "class ResidualVectorQuantizer(BaseQuantizer):\n \"\"\"Residual Vector Quantizer.\n\n Args:\n dimension (int): Dimension of the codebooks.\n n_q (int): Number of residual vector quantizers used.\n q_dropout (bool): Random quantizer drop out at train time.\n bins (int): Codebook size.\n decay (float): Decay for exponential moving average over the codebooks.\n kmeans_init (bool): Whether to use kmeans to initialize the codebooks.\n kmeans_iters (int): Number of iterations used for kmeans initialization.\n threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes\n that have an exponential moving average cluster size less than the specified threshold with\n randomly selected vector from the current batch.\n orthogonal_reg_weight (float): Orthogonal regularization weights.\n orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.\n orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.\n for orthogonal regularization.\n \"\"\"\n def __init__(\n self,\n dimension: int = 256,\n n_q: int = 8,\n q_dropout: bool = False,\n bins: int = 1024,\n decay: float = 0.99,\n kmeans_init: bool = True,\n kmeans_iters: int = 10,\n threshold_ema_dead_code: int = 2,\n orthogonal_reg_weight: float = 0.0,\n orthogonal_reg_active_codes_only: bool = False,\n orthogonal_reg_max_codes: tp.Optional[int] = None,\n ):\n super().__init__()\n self.max_n_q = n_q\n self.n_q = n_q\n self.q_dropout = q_dropout\n self.dimension = dimension\n self.bins = bins\n self.decay = decay\n self.kmeans_init = kmeans_init\n self.kmeans_iters = kmeans_iters\n self.threshold_ema_dead_code = threshold_ema_dead_code\n self.orthogonal_reg_weight = orthogonal_reg_weight\n self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only\n self.orthogonal_reg_max_codes = orthogonal_reg_max_codes\n self.vq = ResidualVectorQuantization(\n dim=self.dimension,\n codebook_size=self.bins,\n num_quantizers=self.n_q,\n decay=self.decay,\n kmeans_init=self.kmeans_init,\n kmeans_iters=self.kmeans_iters,\n threshold_ema_dead_code=self.threshold_ema_dead_code,\n orthogonal_reg_weight=self.orthogonal_reg_weight,\n orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,\n orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,\n channels_last=False\n )\n\n def forward(self, x: torch.Tensor, frame_rate: int):\n n_q = self.n_q\n if self.training and self.q_dropout:\n n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())\n bw_per_q = math.log2(self.bins) * frame_rate / 1000\n quantized, codes, commit_loss = self.vq(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n bw = torch.tensor(n_q * bw_per_q).to(x)\n return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))\n\n def encode(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Encode a given input tensor with the specified frame rate at the given bandwidth.\n The RVQ encode method sets the appropriate number of quantizer to use\n and returns indices for each quantizer.\n \"\"\"\n n_q = self.n_q\n codes = self.vq.encode(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n return codes\n\n def decode(self, codes: torch.Tensor) -> torch.Tensor:\n \"\"\"Decode the given codes to the quantized representation.\"\"\"\n # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].\n codes = codes.transpose(0, 1)\n quantized = self.vq.decode(codes)\n return quantized\n\n @property\n def total_codebooks(self):\n return self.max_n_q\n\n @property\n def num_codebooks(self):\n return self.n_q\n\n def set_num_codebooks(self, n: int):\n assert n > 0 and n <= self.max_n_q\n self.n_q = n" }, { "identifier": "TorchAutocast", "path": "audiocraft/utils/autocast.py", "snippet": "class TorchAutocast:\n \"\"\"TorchAutocast utility class.\n Allows you to enable and disable autocast. This is specially useful\n when dealing with different architectures and clusters with different\n levels of support.\n\n Args:\n enabled (bool): Whether to enable torch.autocast or not.\n args: Additional args for torch.autocast.\n kwargs: Additional kwargs for torch.autocast\n \"\"\"\n def __init__(self, enabled: bool, *args, **kwargs):\n self.autocast = torch.autocast(*args, **kwargs) if enabled else None\n\n def __enter__(self):\n if self.autocast is None:\n return\n try:\n self.autocast.__enter__()\n except RuntimeError:\n device = self.autocast.device\n dtype = self.autocast.fast_dtype\n raise RuntimeError(\n f\"There was an error autocasting with dtype={dtype} device={device}\\n\"\n \"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16\"\n )\n\n def __exit__(self, *args, **kwargs):\n if self.autocast is None:\n return\n self.autocast.__exit__(*args, **kwargs)" }, { "identifier": "EmbeddingCache", "path": "audiocraft/utils/cache.py", "snippet": "class EmbeddingCache:\n \"\"\"Cache around embeddings computation for faster execution.\n The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API\n to retrieve the pre-computed embeddings on full inputs and extract only a given chunk\n using a user-provided function. When the cache is warm (all embeddings are pre-computed),\n the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.\n Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint\n and synchronization points in the forward calls.\n\n Args:\n cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.\n device (str or torch.device): Device on which the embedding is returned.\n compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute\n the embedding from a given object and path. This user provided function can compute the\n embedding from the provided object or using the provided path as entry point. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract\n the desired embedding chunk from the full embedding loaded from the cache. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n If not specified, will return the full embedding unmodified.\n \"\"\"\n def __init__(self, cache_path: tp.Union[str, Path], device: tp.Union[str, torch.device],\n compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],\n extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):\n self.cache_path = Path(cache_path)\n self.device = device\n self._compute_embed_fn = compute_embed_fn\n self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]\n if extract_embed_fn is not None:\n self._extract_embed_fn = extract_embed_fn\n else:\n self._extract_embed_fn = partial(get_full_embed, device=device)\n if self.cache_path is not None:\n self.cache_path.mkdir(exist_ok=True, parents=True)\n logger.info(f\"Cache instantiated at: {self.cache_path}\")\n self.pool = ThreadPoolExecutor(8)\n self.pool.__enter__()\n self._current_batch_cache: dict = {}\n self._memory_cache: dict = {}\n\n def _get_cache_path(self, path: tp.Union[Path, str]):\n \"\"\"Get cache path for the given file path.\"\"\"\n sig = sha1(str(path).encode()).hexdigest()\n return self.cache_path / sig\n\n @staticmethod\n def _get_full_embed_from_cache(cache: Path):\n \"\"\"Loads full pre-computed embedding from the cache.\"\"\"\n try:\n embed = torch.load(cache, 'cpu')\n except Exception as exc:\n logger.error(\"Error loading %s: %r\", cache, exc)\n embed = None\n return embed\n\n def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:\n \"\"\"Get embedding from cache, computing and storing it to cache if not already cached.\n The EmbeddingCache first tries to load the embedding from the in-memory cache\n containing the pre-computed chunks populated through `populate_embed_cache`.\n If not found, the full embedding is computed and stored on disk to be later accessed\n to populate the in-memory cache, and the desired embedding chunk is extracted and returned.\n\n Args:\n paths (list[Path or str]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n embeds = []\n for idx, path in enumerate(paths):\n cache = self._get_cache_path(path)\n if cache in self._current_batch_cache:\n embed = self._current_batch_cache[cache]\n else:\n full_embed = self._compute_embed_fn(path, x, idx)\n try:\n with flashy.utils.write_and_rename(cache, pid=True) as f:\n torch.save(full_embed.cpu(), f)\n except Exception as exc:\n logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)\n else:\n logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)\n embed = self._extract_embed_fn(full_embed, x, idx)\n embeds.append(embed)\n embed = torch.stack(embeds, dim=0)\n return embed\n\n def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:\n \"\"\"Populate in-memory caches for embeddings reading from the embeddings stored on disk.\n The in-memory caches consist in a cache for the full embedding and another cache for the\n final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings\n and reduce the IO footprint and synchronization points during forward passes.\n\n Args:\n paths (list[Path]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n self._current_batch_cache.clear()\n if self.cache_path is not None:\n futures: list = []\n for path in paths:\n assert path is not None, \"Path is required for computation from cache\"\n cache = self._get_cache_path(path)\n if cache in self._memory_cache or not cache.exists():\n futures.append(None)\n else:\n futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))\n for idx, (path, future) in enumerate(zip(paths, futures)):\n assert path is not None\n cache = self._get_cache_path(path)\n full_embed = None\n if future is None:\n if cache in self._memory_cache:\n full_embed = self._memory_cache[cache]\n else:\n full_embed = future.result()\n if full_embed is not None:\n self._memory_cache[cache] = full_embed\n full_embed = full_embed.to(self.device)\n if full_embed is not None:\n embed = self._extract_embed_fn(full_embed, x, idx)\n self._current_batch_cache[cache] = embed" }, { "identifier": "collate", "path": "audiocraft/utils/utils.py", "snippet": "def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Get a list of tensors and collate them to a single tensor. according to the following logic:\n - `dim` specifies the time dimension which will be stacked and padded.\n - The output will contain 1 new dimension (dimension index 0) which will be the size of\n of the original list.\n\n Args:\n tensors (tp.List[torch.Tensor]): List of tensors to collate.\n dim (int): Dimension which will be stacked and padded.\n Returns:\n tp.Tuple[torch.Tensor, torch.Tensor]:\n torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension\n (dimension index 0) which will be the size of the original list.\n torch.Tensor: Tensor containing length of original tensor sizes (without padding).\n \"\"\"\n tensors = [x.transpose(0, dim) for x in tensors]\n lens = torch.LongTensor([len(x) for x in tensors])\n padded_tensors = pad_sequence(tensors)\n padded_tensors = padded_tensors.transpose(0, 1)\n padded_tensors = padded_tensors.transpose(1, dim + 1)\n return padded_tensors, lens" }, { "identifier": "hash_trick", "path": "audiocraft/utils/utils.py", "snippet": "def hash_trick(word: str, vocab_size: int) -> int:\n \"\"\"Hash trick to pair each word with an index\n\n Args:\n word (str): word we wish to convert to an index\n vocab_size (int): size of the vocabulary\n Returns:\n int: index of the word in the embedding LUT\n \"\"\"\n hash = int(hashlib.sha256(word.encode(\"utf-8\")).hexdigest(), 16)\n return hash % vocab_size" }, { "identifier": "length_to_mask", "path": "audiocraft/utils/utils.py", "snippet": "def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:\n \"\"\"Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).\n For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]\n\n Args:\n lengths (torch.Tensor): tensor with lengths\n max_len (int): can set the max length manually. Defaults to None.\n Returns:\n torch.Tensor: mask with 0s where there is pad tokens else 1s\n \"\"\"\n assert len(lengths.shape) == 1, \"Length shape should be 1 dimensional.\"\n final_length = lengths.max().item() if not max_len else max_len\n final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor\n return torch.arange(final_length, device=lengths.device)[None, :] < lengths[:, None]" }, { "identifier": "load_clap_state_dict", "path": "audiocraft/utils/utils.py", "snippet": "def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):\n \"\"\"Wrapper around state dict loading of CLAP model\n addressing compatibility issues between CLAP and AudioCraft\n HuggingFace transformer version.\n See: https://github.com/LAION-AI/CLAP/issues/118\n \"\"\"\n from clap_module.factory import load_state_dict # type: ignore\n pkg = load_state_dict(path)\n pkg.pop('text_branch.embeddings.position_ids', None)\n clap_model.model.load_state_dict(pkg)" }, { "identifier": "warn_once", "path": "audiocraft/utils/utils.py", "snippet": "@lru_cache(None)\ndef warn_once(logger, msg):\n \"\"\"Warn about a given message only once.\"\"\"\n logger.warning(msg)" }, { "identifier": "chords", "path": "audiocraft/modules/btc/utils/chords.py", "snippet": "def chords(self, labels):\n\n \"\"\"\n Transform a list of chord labels into an array of internal numeric\n representations.\n\n Parameters\n ----------\n labels : list\n List of chord labels (str).\n\n Returns\n -------\n chords : numpy.array\n Structured array with columns 'root', 'bass', and 'intervals',\n containing a numeric representation of chords.\n\n \"\"\"\n crds = np.zeros(len(labels), dtype=CHORD_DTYPE)\n cache = {}\n for i, lbl in enumerate(labels):\n cv = cache.get(lbl, None)\n if cv is None:\n cv = self.chord(lbl)\n cache[lbl] = cv\n crds[i] = cv\n\n return crds" } ]
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,735
if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None:
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None:
warn_once(logger, "Using precomputed evaluation wavs!")
15
2023-10-09 09:55:24+00:00
16k
Texaser/MTN
nerf/network.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.max_level = None\n self.dmtet = opt.dmtet\n self.cuda_ray = opt.cuda_ray\n self.taichi_ray = opt.taichi_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n self.train_step = 0\n self.max_train_step = 6000\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor([-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n self.glctx = None\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n if self.opt.dmtet:\n # load dmtet vertices\n tets = np.load('tets/{}_tets.npz'.format(self.opt.tet_grid_size))\n self.verts = - torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * 2 # covers [-1, 1]\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.tet_scale = torch.tensor([1, 1, 1], dtype=torch.float32, device='cuda')\n self.dmtet = DMTet('cuda')\n\n # vert sdf and deform\n sdf = torch.nn.Parameter(torch.zeros_like(self.verts[..., 0]), requires_grad=True)\n self.register_parameter('sdf', sdf)\n deform = torch.nn.Parameter(torch.zeros_like(self.verts), requires_grad=True)\n self.register_parameter('deform', deform)\n\n edges = torch.tensor([0,1, 0,2, 0,3, 1,2, 1,3, 2,3], dtype=torch.long, device=\"cuda\") # six edges for each tetrahedron.\n all_edges = self.indices[:,edges].reshape(-1,2) # [M * 6, 2]\n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n if self.opt.h <= 2048 and self.opt.w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n \n if self.taichi_ray:\n from einops import rearrange\n from taichi_modules import RayMarcherTaichi\n from taichi_modules import VolumeRendererTaichi\n from taichi_modules import RayAABBIntersector as RayAABBIntersectorTaichi\n from taichi_modules import raymarching_test as raymarching_test_taichi\n from taichi_modules import composite_test as composite_test_fw\n from taichi_modules import packbits as packbits_taichi\n self.rearrange = rearrange\n self.packbits_taichi = packbits_taichi\n self.ray_aabb_intersector = RayAABBIntersectorTaichi\n self.raymarching_test_taichi = raymarching_test_taichi\n self.composite_test_fw = composite_test_fw\n self.ray_marching = RayMarcherTaichi(batch_size=4096) # TODO: hard encoded batch size\n self.volume_render = VolumeRendererTaichi(batch_size=4096) # TODO: hard encoded batch size\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n @torch.no_grad()\n def density_blob(self, x):\n # x: [B, N, 3]\n \n d = (x ** 2).sum(-1)\n \n if self.opt.density_activation == 'exp':\n g = self.opt.blob_density * torch.exp(- d / (2 * self.opt.blob_radius ** 2))\n else:\n g = self.opt.blob_density * (1 - torch.sqrt(d) / self.opt.blob_radius)\n\n return g\n \n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not (self.cuda_ray or self.taichi_ray):\n return \n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, decimate_target=-1, S=128):\n\n if self.opt.dmtet:\n\n sdf = self.sdf\n deform = torch.tanh(self.deform) / self.opt.tet_grid_size\n\n vertices, triangles = self.dmtet(self.verts + deform, sdf, self.indices)\n\n vertices = vertices.detach().cpu().numpy()\n triangles = triangles.detach().cpu().numpy()\n\n else:\n\n if resolution is None:\n resolution = self.grid_size\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh) \\\n if np.greater(self.mean_density, 0) else self.density_thresh\n else:\n density_thresh = self.density_thresh\n \n # TODO: use a larger thresh to extract a surface mesh from the density field, but this value is very empirical...\n if self.opt.density_activation == 'softplus':\n density_thresh = density_thresh * 25\n \n sigmas = np.zeros([resolution, resolution, resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-1, 1, resolution).split(S)\n Y = torch.linspace(-1, 1, resolution).split(S)\n Z = torch.linspace(-1, 1, resolution).split(S)\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = self.density(pts.to(self.aabb_train.device))\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n print(f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})')\n\n vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n vertices = vertices / (resolution - 1.0) * 2 - 1\n\n # clean\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n vertices, triangles = clean_mesh(vertices, triangles, remesh=True, remesh_size=0.01)\n \n # decimation\n if decimate_target > 0 and triangles.shape[0] > decimate_target:\n vertices, triangles = decimate_mesh(vertices, triangles, decimate_target)\n\n v = torch.from_numpy(vertices).contiguous().float().to(self.aabb_train.device)\n f = torch.from_numpy(triangles).contiguous().int().to(self.aabb_train.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n print(f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 4 # for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n \n if self.glctx is None:\n if h <= 2048 and w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n rast, _ = dr.rasterize(self.glctx, uv.unsqueeze(0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n feats[mask] = torch.cat(all_feats, dim=0)\n \n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # do ssaa after the NN search, in numpy\n if ssaa > 1:\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n print(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n \n print(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n \n print(f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n') \n\n print(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n _export(v, f)\n\n def run(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n # nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near)\n # nears.unsqueeze_(-1)\n # fars.unsqueeze_(-1)\n nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')\n\n z_vals = torch.linspace(0.0, 1.0, self.opt.num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, self.opt.num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / self.opt.num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, self.opt.num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, self.opt.num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if self.opt.upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], self.opt.upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, self.opt.upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, self.opt.upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n light_d = light_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d.reshape(-1, 3), ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n if normals is not None:\n normals = normals.view(N, -1, 3)\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n depth = torch.sum(weights * z_vals, dim=-1)\n\n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n weights_sum = weights_sum.reshape(*prefix)\n\n if self.training:\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.sum(-1).mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n normal_image = torch.sum(weights.unsqueeze(-1) * (normals + 1) / 2, dim=-2) # [N, 3], in [0, 1]\n results['normal_image'] = normal_image\n \n results['image'] = image\n results['depth'] = depth\n results['weights'] = weights\n results['weights_sum'] = weights_sum\n\n return results\n\n\n def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n results = {}\n\n if self.training:\n self.train_step += 1\n # print(self.train_epoch)\n xyzs, dirs, ts, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n\n if light_d.shape[0] > 1:\n flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long()\n light_d = light_d[flatten_rays]\n\n \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n weights, weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, ts, rays, T_thresh, binarize)\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize)\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize)\n\n rays_alive = rays_alive[rays_alive >= 0]\n #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n # bg_color = 1\n # bg_color = 1e-3\n if shading == 'normal':\n bg_color = 1\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n @torch.no_grad()\n def init_tet(self, mesh=None):\n\n if mesh is not None:\n # normalize mesh\n scale = 0.8 / np.array(mesh.bounds[1] - mesh.bounds[0]).max()\n center = np.array(mesh.bounds[1] + mesh.bounds[0]) / 2\n mesh.vertices = (mesh.vertices - center) * scale\n\n # init scale\n # self.tet_scale = torch.from_numpy(np.abs(mesh.vertices).max(axis=0) + 1e-1).to(self.verts.dtype).cuda()\n self.tet_scale = torch.from_numpy(np.array([np.abs(mesh.vertices).max()]) + 1e-1).to(self.verts.dtype).cuda()\n self.verts = self.verts * self.tet_scale\n\n # init sdf\n import cubvh\n BVH = cubvh.cuBVH(mesh.vertices, mesh.faces)\n sdf, _, _ = BVH.signed_distance(self.verts, return_uvw=False, mode='watertight')\n sdf *= -10 # INNER is POSITIVE, also make it stronger\n self.sdf.data += sdf.to(self.sdf.data.dtype).clamp(-1, 1)\n\n else:\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh)\n else:\n density_thresh = self.density_thresh\n \n if self.opt.density_activation == 'softplus':\n density_thresh = density_thresh * 25\n\n # init scale\n sigma = self.density(self.verts)['sigma'] # verts covers [-1, 1] now\n mask = sigma > density_thresh\n valid_verts = self.verts[mask]\n self.tet_scale = valid_verts.abs().amax(dim=0) + 1e-1\n self.verts = self.verts * self.tet_scale\n\n # init sigma\n sigma = self.density(self.verts)['sigma'] # new verts\n self.sdf.data += (sigma - density_thresh).clamp(-1, 1)\n\n print(f'[INFO] init dmtet: scale = {self.tet_scale}')\n\n\n def run_dmtet(self, rays_o, rays_d, mvp, h, w, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, **kwargs):\n # mvp: [B, 4, 4]\n\n device = mvp.device\n campos = rays_o[:, 0, :] # only need one ray per batch\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(campos + torch.randn_like(campos)).view(-1, 1, 1, 3) # [B, 1, 1, 3]\n\n results = {}\n\n # get mesh\n sdf = self.sdf\n deform = torch.tanh(self.deform) / self.opt.tet_grid_size\n\n verts, faces = self.dmtet(self.verts + deform, sdf, self.indices)\n\n # get normals\n i0, i1, i2 = faces[:, 0], faces[:, 1], faces[:, 2]\n v0, v1, v2 = verts[i0, :], verts[i1, :], verts[i2, :]\n\n faces = faces.int()\n \n face_normals = torch.cross(v1 - v0, v2 - v0)\n face_normals = safe_normalize(face_normals)\n \n vn = torch.zeros_like(verts)\n vn.scatter_add_(0, i0[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i1[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i2[:, None].repeat(1,3), face_normals)\n\n vn = torch.where(torch.sum(vn * vn, -1, keepdim=True) > 1e-20, vn, torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device))\n\n # rasterization\n verts_clip = torch.bmm(F.pad(verts, pad=(0, 1), mode='constant', value=1.0).unsqueeze(0).repeat(mvp.shape[0], 1, 1), \n mvp.permute(0,2,1)).float() # [B, N, 4]\n rast, rast_db = dr.rasterize(self.glctx, verts_clip, faces, (h, w))\n \n alpha = (rast[..., 3:] > 0).float()\n xyzs, _ = dr.interpolate(verts.unsqueeze(0), rast, faces) # [B, H, W, 3]\n normal, _ = dr.interpolate(vn.unsqueeze(0).contiguous(), rast, faces)\n normal = safe_normalize(normal)\n\n xyzs = xyzs.view(-1, 3)\n mask = (rast[..., 3:] > 0).view(-1).detach()\n\n # do the lighting here since we have normal from mesh now.\n albedo = torch.zeros_like(xyzs, dtype=torch.float32)\n if mask.any():\n masked_albedo = self.density(xyzs[mask])['albedo']\n albedo[mask] = masked_albedo.float()\n albedo = albedo.view(-1, h, w, 3)\n\n # these two modes lead to no parameters to optimize if using --lock_geo.\n if self.opt.lock_geo and shading in ['textureless', 'normal']:\n shading = 'lambertian'\n\n if shading == 'albedo':\n color = albedo\n elif shading == 'textureless':\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = lambertian.unsqueeze(-1).repeat(1, 1, 1, 3)\n elif shading == 'normal':\n color = (normal + 1) / 2\n else: # 'lambertian'\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = albedo * lambertian.unsqueeze(-1)\n\n color = dr.antialias(color, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n alpha = dr.antialias(alpha, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n if torch.is_tensor(bg_color) and len(bg_color.shape) > 1:\n bg_color = bg_color.view(-1, h, w, 3)\n \n depth = rast[:, :, :, [2]] # [B, H, W]\n color = color + (1 - alpha) * bg_color\n\n results['depth'] = depth \n results['image'] = color\n results['weights_sum'] = alpha.squeeze(-1)\n\n if self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0:\n normal_image = dr.antialias((normal + 1) / 2, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n results['normal_image'] = normal_image\n \n # regularizations\n if self.training:\n if self.opt.lambda_mesh_normal > 0:\n results['normal_loss'] = normal_consistency(face_normals, faces)\n if self.opt.lambda_mesh_laplacian > 0:\n results['lap_loss'] = laplacian_smooth_loss(verts, faces)\n\n return results\n\n def run_taichi(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n exp_step_factor = kwargs.get('exp_step_factor', 0.)\n MAX_SAMPLES = 1024\n NEAR_DISTANCE = 0.01\n center = torch.zeros(1, 3)\n half_size = torch.ones(1, 3)\n _, hits_t, _ = self.ray_aabb_intersector.apply(rays_o, rays_d, center, half_size, 1)\n hits_t[(hits_t[:, 0, 0] >= 0) & (hits_t[:, 0, 0] < NEAR_DISTANCE), 0, 0] = NEAR_DISTANCE\n\n # TODO: should sample different light_d for each batch... but taichi end doesn't have a flatten_ray implemented currently...\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n rays_a, xyzs, dirs, deltas, ts, _ = self.ray_marching(rays_o, rays_d, hits_t[:, 0], self.density_bitfield, self.cascade, self.bound, exp_step_factor, self.grid_size, MAX_SAMPLES)\n dirs = safe_normalize(dirs)\n # plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n _, weights_sum, depth, image, weights = self.volume_render(sigmas, rgbs, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n _, _, _, normal_image, _ = self.volume_render(sigmas.detach(), (normals + 1) / 2, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = hits_t[:, 0, 0]\n step = 0\n \n min_samples = 1 if exp_step_factor == 0 else 4\n\n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n # n_step = max(min(N // n_alive, 8), 1)\n n_step = max(min(N // n_alive, 64), min_samples)\n\n xyzs, dirs, deltas, ts, N_eff_samples = \\\n self.raymarching_test_taichi(rays_o, rays_d, hits_t[:, 0], rays_alive,\n self.density_bitfield, self.cascade,\n self.bound, exp_step_factor,\n self.grid_size, MAX_SAMPLES, n_step)\n\n xyzs = self.rearrange(xyzs, 'n1 n2 c -> (n1 n2) c')\n dirs = self.rearrange(dirs, 'n1 n2 c -> (n1 n2) c')\n dirs = safe_normalize(dirs)\n valid_mask = ~torch.all(dirs == 0, dim=1)\n if valid_mask.sum() == 0:\n break\n\n sigmas = torch.zeros(len(xyzs), device=device)\n rgbs = torch.zeros(len(xyzs), 3, device=device)\n normals = torch.zeros(len(xyzs), 3, device=device)\n\n sigmas[valid_mask], _rgbs, normals = self(xyzs[valid_mask], dirs[valid_mask], light_d, ratio=ambient_ratio, shading=shading)\n rgbs[valid_mask] = _rgbs.float()\n sigmas = self.rearrange(sigmas, '(n1 n2) -> n1 n2', n2=n_step)\n rgbs = self.rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=n_step)\n if normals is not None:\n normals = self.rearrange(normals, '(n1 n2) c -> n1 n2 c', n2=n_step)\n\n self.composite_test_fw(sigmas, rgbs, deltas, ts, hits_t[:,0], rays_alive,\n kwargs.get('T_threshold', 1e-4), N_eff_samples,\n weights_sum, depth, image)\n\n rays_alive = rays_alive[rays_alive >= 0]\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + self.rearrange(1 - weights_sum, 'n -> n 1') * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not (self.cuda_ray or self.taichi_ray):\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n if self.cuda_ray:\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n elif self.taichi_ray:\n self.packbits_taichi(self.density_grid.reshape(-1).contiguous(), density_thresh, self.density_bitfield)\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f}')\n\n\n def render(self, rays_o, rays_d, mvp, h, w, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: pred_rgb: [B, N, 3]\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n if self.dmtet:\n results = self.run_dmtet(rays_o, rays_d, mvp, h, w, **kwargs)\n elif self.cuda_ray:\n results = self.run_cuda(rays_o, rays_d, **kwargs)\n elif self.taichi_ray:\n results = self.run_taichi(rays_o, rays_d, **kwargs)\n else:\n if staged:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = self.run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = self.run(rays_o, rays_d, **kwargs)\n\n return results" }, { "identifier": "get_encoder", "path": "encoding.py", "snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False, interpolation='linear',\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency_torch':\n encoder = FreqEncoder_torch(input_dim=input_dim, max_freq_log2=multires-1, N_freqs=multires, log_sampling=True)\n\n elif encoding == 'frequency': # CUDA implementation, faster than torch.\n from freqencoder import FreqEncoder\n encoder = FreqEncoder(input_dim=input_dim, degree=multires)\n\n elif encoding == 'sphere_harmonics':\n from shencoder import SHEncoder\n encoder = SHEncoder(input_dim=input_dim, degree=degree)\n\n elif encoding == 'hashgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'tiledgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'hashgrid_taichi':\n from taichi_modules.hash_encoder import HashEncoderTaichi\n encoder = HashEncoderTaichi(batch_size=4096) #TODO: hard encoded batch size\n\n elif encoding == 'multiscale_triplane':\n from gridencoder import MultiScaleTriplane\n # encoder = MiniTriplane(input_dim=input_dim)\n encoder = MultiScaleTriplane(input_dim=input_dim)\n\n elif encoding == 'multiscale_triplane_pooling':\n from gridencoder import MultiScaleTriplane_Pooling\n encoder = MultiScaleTriplane_Pooling(input_dim=input_dim)\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from activation import trunc_exp from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize
13,514
# TODO: not sure about the details... class ResBlock(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dense = nn.Linear(self.dim_in, self.dim_out, bias=bias) self.norm = nn.LayerNorm(self.dim_out) self.activation = nn.SiLU(inplace=True) if self.dim_in != self.dim_out: self.skip = nn.Linear(self.dim_in, self.dim_out, bias=False) else: self.skip = None def forward(self, x): # x: [B, C] identity = x out = self.dense(x) out = self.norm(out) if self.skip is not None: identity = self.skip(identity) out += identity out = self.activation(out) return out class BasicBlock(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dense = nn.Linear(self.dim_in, self.dim_out, bias=bias) self.activation = nn.ReLU(inplace=True) def forward(self, x): # x: [B, C] out = self.dense(x) out = self.activation(out) return out class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True, block=BasicBlock): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): if l == 0: net.append(BasicBlock(self.dim_in, self.dim_hidden, bias=bias)) elif l != num_layers - 1: net.append(block(self.dim_hidden, self.dim_hidden, bias=bias)) else: net.append(nn.Linear(self.dim_hidden, self.dim_out, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) return x
# TODO: not sure about the details... class ResBlock(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dense = nn.Linear(self.dim_in, self.dim_out, bias=bias) self.norm = nn.LayerNorm(self.dim_out) self.activation = nn.SiLU(inplace=True) if self.dim_in != self.dim_out: self.skip = nn.Linear(self.dim_in, self.dim_out, bias=False) else: self.skip = None def forward(self, x): # x: [B, C] identity = x out = self.dense(x) out = self.norm(out) if self.skip is not None: identity = self.skip(identity) out += identity out = self.activation(out) return out class BasicBlock(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dense = nn.Linear(self.dim_in, self.dim_out, bias=bias) self.activation = nn.ReLU(inplace=True) def forward(self, x): # x: [B, C] out = self.dense(x) out = self.activation(out) return out class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True, block=BasicBlock): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): if l == 0: net.append(BasicBlock(self.dim_in, self.dim_hidden, bias=bias)) elif l != num_layers - 1: net.append(block(self.dim_hidden, self.dim_hidden, bias=bias)) else: net.append(nn.Linear(self.dim_hidden, self.dim_out, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) return x
class NeRFNetwork(NeRFRenderer):
1
2023-10-11 04:06:20+00:00
16k
oracle/guardian-ai
guardian_ai/fairness/metrics/core.py
[ { "identifier": "EqualizedOddsScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class EqualizedOddsScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n The disparity is measured by comparing the true positive and false positive\n rates on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n False Positive Rate (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Equalized Odds [1] is computed by taking the maximum distance between\n TPR and FPR for a subgroup against the rest of the subgroups.\n\n Perfect score\n A perfect score for this metric means that the model has the same TPR and\n FPR when comparing a subgroup to the rest of the subgroups. For example,\n if the protected attributes are race and sex, then a perfect\n Equalized Odds disparity would mean that all combinations of values for\n race and sex have identical TPR and FPR. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import EqualizedOddsScorer\n scorer = EqualizedOddsScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=equalized_odds,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ErrorRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ErrorRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the error rate on\n instances of a subgroup against the rest of the subgroups.\n\n Error Rate (also known as inaccuracy) is calculated as\n (FP + FN) / N, where FP and FN are the number of false positives and\n false negatives, respectively, while N is the total Number of\n instances.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect error rate disparity would\n mean that all combinations of values for race and sex have identical\n error rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ErrorRateScorer\n scorer = ErrorRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=error_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseDiscoveryRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseDiscoveryRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n discovery rate on instances of a subgroup against the rest of the\n subgroups.\n\n False Discovery Rate (also known as FDR) is calculated as\n FP / (FP + TP), where FP and TP are the number of false positives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false discovery rate disparity\n would mean that all combinations of values for race and sex have identical\n false discovery rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseDiscoveryRateScorer\n scorer = FalseDiscoveryRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_discovery_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseNegativeRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseNegativeRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n negative rate on instances of a subgroup against the rest of the subgroups.\n\n False Negative Rate [1] (also known as FNR or miss rate) is calculated as\n FN / (FN + TP), where FN and TP are the number of false negatives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false negative rate disparity\n would mean that all combinations of values for race and sex have identical\n false negative rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseNegativeRateScorer\n scorer = FalseNegativeRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_negative_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseOmissionRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseOmissionRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n omission rate on instances of a subgroup against the rest of the subgroups.\n\n False Omission Rate (also known as FOR) is calculated as\n FN / (FN + TN), where FN and TN are the number of false negatives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false omission rate disparity\n would mean that all combinations of values for race and sex have identical\n false omission rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseOmissionRateScorer\n scorer = FalseOmissionRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_omission_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalsePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalsePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n positive rate on instances of a subgroup against the rest of the subgroups.\n\n False Positive Rate [1] (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false positive rate disparity\n would mean that all combinations of values for race and sex have identical\n false positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalsePositiveRateScorer\n scorer = FalsePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ModelStatisticalParityScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ModelStatisticalParityScorer(_ModelFairnessScorer): # noqa: D412\n \"\"\"\n Measure the statistical parity [1] of a model's output between all subgroup pairs.\n\n Statistical parity (also known as Base Rate or Disparate Impact) states that\n a predictor is unbiased if the prediction is independent of the protected\n attribute.\n\n Statistical Parity is calculated as PP / N, where PP and N are the number of\n Positive Predictions and total Number of predictions made, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not predict\n positively any of the subgroups at a different rate than it does for the\n rest of the subgroups. For example, if the protected attributes are race\n and sex, then a perfect statistical parity would mean that all combinations\n of values for race and sex have identical ratios of positive predictions.\n Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n\n References\n ----------\n [1] `Cynthia Dwork et al. \"Fairness Through Awareness\". Innovations in\n Theoretical Computer Science. 2012. <https://arxiv.org/abs/1104.3913>`_\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ModelStatisticalParityScorer\n\n scorer = ModelStatisticalParityScorer(['race', 'sex'])\n scorer(model, X, y_true)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n scorer(model, X)\n \"\"\" # noqa: D412\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=model_statistical_parity,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )\n\n def __call__(\n self,\n model: Any,\n X: pd.DataFrame,\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n supplementary_features: Optional[pd.DataFrame] = None,\n ):\n \"\"\"\n Compute the metric using a model's predictions on a given array\n of instances ``X``.\n\n Parameters\n ----------\n model: Any\n Object that implements a `predict(X)` function to collect\n categorical predictions.\n X : pandas.DataFrame\n Array of instances to compute the metric on.\n y_true : pandas.Series, numpy.ndarray, list, or None, default=None\n Array of groundtruth labels.\n supplementary_features : pandas.DataFrame, or None, default=None\n Array of supplementary features for each instance. Used in case\n one attribute in ``self.protected_attributes`` is not contained by\n ``X`` (e.g. if the protected attribute is not used by the model).\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to ``self.reduction``.\n\n\n Raises\n ------\n GuardianAIValueError\n - if a feature is present in both ``X``\n and ``supplementary_features``.\n\n \"\"\"\n y_pred = model.predict(X)\n\n subgroups = self._get_check_subgroups(X, supplementary_features)\n\n return self.metric(\n y_true, y_pred, subgroups, self.distance_measure, self.reduction\n )" }, { "identifier": "TheilIndexScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TheilIndexScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n Intuitively, the Theil Index can be thought of as a measure of the\n divergence between a subgroup's different error distributions (i.e. false\n positives and false negatives) against the rest of the subgroups.\n\n Perfect score\n The perfect score for this metric is 0, meaning that the model does not\n have a different error distribution for any subgroup when compared to the\n rest of the subgroups. For example, if the protected attributes are\n race and sex, then a perfect Theil Index disparity would mean that all\n combinations of values for race and sex have identical error\n distributions.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TheilIndexScorer\n scorer = TheilIndexScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=theil_index,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=True,\n )" }, { "identifier": "TruePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TruePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive rate between\n all subgroup pairs (also known as equal opportunity).\n\n For each subgroup, the disparity is measured by comparing the true positive\n rate on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate [1] (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n\n Perfect score\n A perfect score for this metric means that the model does not correctly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect true positive rate disparity\n would mean that all combinations of values for race and sex have\n identical true positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TruePositiveRateScorer\n scorer = TruePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=true_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "equalized_odds", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def equalized_odds(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n For more details, refer to :class:`.EqualizedOddsScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import equalized_odds\n subgroups = X[['race', 'sex']]\n equalized_odds(y_true, y_pred, subgroups)\n \"\"\"\n tpr = true_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n\n fpr = false_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n if isinstance(tpr, dict):\n eq_odds = {}\n for key in tpr:\n eq_odds[key] = np.nanmax([tpr[key], fpr[key]])\n else:\n eq_odds = np.nanmax([tpr, fpr])\n\n return eq_odds" }, { "identifier": "error_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def error_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For more details, refer to :class:`.ErrorRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import error_rate\n subgroups = X[['race', 'sex']]\n error_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"error_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_discovery_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_discovery_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseDiscoveryRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_discovery_rate\n subgroups = X[['race', 'sex']]\n false_discovery_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_discovery_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_negative_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_negative_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseNegativeRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_negative_rate\n subgroups = X[['race', 'sex']]\n false_negative_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_negative_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_omission_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_omission_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseOmissionRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_omission_rate\n subgroups = X[['race', 'sex']]\n false_omission_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_omission_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalsePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_positive_rate\n subgroups = X[['race', 'sex']]\n false_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "model_statistical_parity", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def model_statistical_parity(\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n y_pred: Optional[Union[pd.Series, np.ndarray, List]] = None,\n subgroups: Optional[pd.DataFrame] = None,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measure the statistical parity of a model's output between all subgroup pairs.\n\n For more details, refer to :class:`.ModelStatisticalParityScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list or None, default=None\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list or None, default=None\n Array of model predictions.\n subgroups : pandas.DataFrame or None, default=None\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If Value of None is received for either `y_pred` or `subgroups`.\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import model_statistical_parity\n subgroups = X[['race', 'sex']]\n model_statistical_parity(y_true, y_pred, subgroups)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n model_statistical_parity(None, y_pred, subgroups)\n model_statistical_parity(y_pred=y_pred, subgroups=subgroups)\n \"\"\" # noqa: D412\n\n if y_pred is None or subgroups is None:\n raise GuardianAIValueError(\n \"Value of None was received for either `y_pred` or `subgroups`. \"\n \"This may be due to calling the metric using only 2 positional \"\n \"arguments. If this is the case, either call the function by \"\n \"passing ``None`` as the first argument or use named arguments for \"\n \"`y_pred` and `subgroups`.\"\n )\n\n return _model_metric(\n None,\n y_pred,\n subgroups,\n metric=\"selection_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=True,\n allow_distance_measure_none=False,\n )" }, { "identifier": "theil_index", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def theil_index(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n For more details, refer to :class:`.TheilIndexScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If distance_measure values are given to Theil Index.\n\n References\n ----------\n [1]: `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import theil_index\n subgroups = X[['race', 'sex']]\n theil_index(y_true, y_pred, subgroups)\n \"\"\"\n\n if distance_measure is not None and not isinstance(\n distance_measure, _DistanceMetric\n ):\n raise GuardianAIValueError(\n \"Theil Index does not accept distance_measure values. It should\"\n \"always be set to ``None``.\"\n )\n\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"between_group_theil_index\",\n distance_measure=None,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=True,\n )" }, { "identifier": "true_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def true_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.TruePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import true_positive_rate\n subgroups = X[['race', 'sex']]\n true_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"true_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "GuardianAIValueError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAIValueError(ValueError, GuardianAIError):\n \"\"\"Exception raised for unexpected values.\"\"\"\n\n pass" } ]
from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAIValueError
13,057
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ """Core for fairness metrics""" fairness_scorers_dict = { # noqa N816 "statistical_parity": ModelStatisticalParityScorer, "TPR": TruePositiveRateScorer, "FPR": FalsePositiveRateScorer, "FNR": FalseNegativeRateScorer, "FOR": FalseOmissionRateScorer, "FDR": FalseDiscoveryRateScorer, "error_rate": ErrorRateScorer, "equalized_odds": EqualizedOddsScorer, "theil_index": TheilIndexScorer, } def _get_fairness_scorer(metric, protected_attributes, **kwargs): # noqa N802 if metric not in fairness_scorers_dict: raise GuardianAIValueError( f"{metric} is not a supported model fairness metric. Supported " f"metrics are: {list(fairness_scorers_dict)}." ) return fairness_scorers_dict[metric](protected_attributes, **kwargs) fairness_metrics_dict = { "statistical_parity": model_statistical_parity, "TPR": true_positive_rate, "FPR": false_positive_rate, "FNR": false_negative_rate, "FOR": false_omission_rate, "FDR": false_discovery_rate, "error_rate": error_rate, "equalized_odds": equalized_odds,
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ """Core for fairness metrics""" fairness_scorers_dict = { # noqa N816 "statistical_parity": ModelStatisticalParityScorer, "TPR": TruePositiveRateScorer, "FPR": FalsePositiveRateScorer, "FNR": FalseNegativeRateScorer, "FOR": FalseOmissionRateScorer, "FDR": FalseDiscoveryRateScorer, "error_rate": ErrorRateScorer, "equalized_odds": EqualizedOddsScorer, "theil_index": TheilIndexScorer, } def _get_fairness_scorer(metric, protected_attributes, **kwargs): # noqa N802 if metric not in fairness_scorers_dict: raise GuardianAIValueError( f"{metric} is not a supported model fairness metric. Supported " f"metrics are: {list(fairness_scorers_dict)}." ) return fairness_scorers_dict[metric](protected_attributes, **kwargs) fairness_metrics_dict = { "statistical_parity": model_statistical_parity, "TPR": true_positive_rate, "FPR": false_positive_rate, "FNR": false_negative_rate, "FOR": false_omission_rate, "FDR": false_discovery_rate, "error_rate": error_rate, "equalized_odds": equalized_odds,
"theil_index": theil_index,
16
2023-10-09 09:48:50+00:00
16k
IST-DASLab/SparseFinetuning
llmfoundry/models/mpt/modeling_mpt.py
[ { "identifier": "attn_bias_shape", "path": "llmfoundry/models/layers/attention.py", "snippet": "def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool,\n prefix_lm: bool, causal: bool, use_sequence_id: bool):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'{attn_impl=} is an invalid setting.')" }, { "identifier": "build_attn_bias", "path": "llmfoundry/models/layers/attention.py", "snippet": "def build_attn_bias(\n attn_impl: str,\n attn_bias: torch.Tensor,\n n_heads: int,\n seq_len: int,\n causal: bool = False,\n alibi: bool = False,\n alibi_bias_max: int = 8,\n):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n # in place add alibi to attn bias\n device, dtype = attn_bias.device, attn_bias.dtype\n attn_bias = attn_bias.add(\n build_alibi_bias(\n n_heads,\n seq_len,\n full=not causal,\n alibi_bias_max=alibi_bias_max,\n device=device,\n dtype=dtype,\n ))\n return attn_bias\n else:\n raise ValueError(f'{attn_impl=} is an invalid setting.')" }, { "identifier": "MPTBlock", "path": "llmfoundry/models/layers/blocks.py", "snippet": "class MPTBlock(nn.Module):\n\n def __init__(\n self,\n d_model: int,\n n_heads: int,\n expansion_ratio: int,\n attn_config: Optional[Dict] = None,\n ffn_config: Optional[Dict] = None,\n resid_pdrop: float = 0.0,\n norm_type: str = 'low_precision_layernorm',\n verbose: int = 0,\n fc_type: str = 'torch',\n device: Optional[str] = None,\n **kwargs: Any,\n ):\n if attn_config is None:\n attn_config = {\n 'attn_type': 'multihead_attention',\n 'attn_pdrop': 0.0,\n 'attn_impl': 'triton',\n 'qk_ln': False,\n 'clip_qkv': None,\n 'softmax_scale': None,\n 'prefix_lm': False,\n 'attn_uses_sequence_id': False,\n 'alibi': False,\n 'alibi_bias_max': 8,\n }\n\n if ffn_config is None:\n ffn_config = {\n 'ffn_type': 'mptmlp',\n }\n\n del kwargs # unused, just to capture any extra args from the config\n super().__init__()\n\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n assert isinstance(attn_config['attn_type'], str)\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n\n # necessary to avoid passing extraneous args into attn_class while allowing the use of **kwargs\n args_to_exclude_in_attn_class = {\n 'attn_type', 'prefix_lm', 'alibi', 'attn_uses_sequence_id',\n 'alibi_bias_max'\n }\n attn_config_subset_for_attn_class = {\n k: v\n for k, v in attn_config.items()\n if k not in args_to_exclude_in_attn_class\n }\n\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(d_model=d_model,\n n_heads=n_heads,\n fc_type=fc_type,\n verbose=verbose,\n device=device,\n **attn_config_subset_for_attn_class)\n self.norm_2 = None\n if not getattr(FFN_CLASS_REGISTRY[ffn_config['ffn_type']], '_has_norm',\n False):\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = build_ffn(\n d_model=d_model,\n expansion_ratio=expansion_ratio,\n device=device,\n **ffn_config,\n )\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(\n self,\n x: torch.Tensor,\n past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n attn_bias: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.ByteTensor] = None,\n is_causal: bool = True,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[\n torch.Tensor, torch.Tensor]]]:\n a = self.norm_1(x)\n b, attn_weights, past_key_value = self.attn(\n a,\n past_key_value=past_key_value,\n attn_bias=attn_bias,\n attention_mask=attention_mask,\n is_causal=is_causal,\n )\n x = x + self.resid_attn_dropout(b)\n m = x\n if self.norm_2 is not None:\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return x, attn_weights, past_key_value" }, { "identifier": "SharedEmbedding", "path": "llmfoundry/models/layers/custom_embedding.py", "snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool = False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)" }, { "identifier": "FC_CLASS_REGISTRY", "path": "llmfoundry/models/layers/fc.py", "snippet": "FC_CLASS_REGISTRY = {\n 'torch': nn.Linear,\n}" }, { "identifier": "FFN_CLASS_REGISTRY", "path": "llmfoundry/models/layers/ffn.py", "snippet": "FFN_CLASS_REGISTRY = {\n 'mptmlp': MPTMLP,\n}" }, { "identifier": "MPTMLP", "path": "llmfoundry/models/layers/ffn.py", "snippet": "class MPTMLP(nn.Module):\n\n def __init__(\n self,\n d_model: int,\n expansion_ratio: int,\n fc_type: str = 'torch',\n device: Optional[str] = None,\n ):\n super().__init__()\n fc_kwargs = {}\n if fc_type != 'te':\n fc_kwargs['device'] = device\n self.up_proj = FC_CLASS_REGISTRY[fc_type](\n d_model,\n expansion_ratio * d_model,\n **fc_kwargs,\n )\n self.act = nn.GELU(approximate='none')\n self.down_proj = FC_CLASS_REGISTRY[fc_type](\n expansion_ratio * d_model,\n d_model,\n **fc_kwargs,\n )\n self.down_proj._is_residual = True # type: ignore\n\n def forward(self, x: torch.Tensor):\n return self.down_proj(self.act(self.up_proj(x)))" }, { "identifier": "build_ffn", "path": "llmfoundry/models/layers/ffn.py", "snippet": "def build_ffn(\n d_model: int,\n expansion_ratio: int,\n fc_type: str = 'torch',\n device: Optional[str] = None,\n **kwargs: Any,\n):\n ffn_type = kwargs.pop('ffn_type')\n if ffn_type == 'mptmlp':\n if len(kwargs) > 0:\n raise ValueError(\n f'MPTMLP got an unexpected keyword argument: {kwargs}')\n return MPTMLP(\n d_model=d_model,\n expansion_ratio=expansion_ratio,\n fc_type=fc_type,\n device=device,\n )\n elif ffn_type == 'te_ln_mlp':\n assert te is not None\n return te.LayerNormMLP(\n hidden_size=d_model,\n ffn_hidden_size=d_model * expansion_ratio,\n **kwargs,\n )\n\n raise ValueError(f'{ffn_type=} not recognized.')" }, { "identifier": "NORM_CLASS_REGISTRY", "path": "llmfoundry/models/layers/norm.py", "snippet": "NORM_CLASS_REGISTRY: Dict[str, Type[torch.nn.Module]] = {\n 'layernorm': torch.nn.LayerNorm,\n 'low_precision_layernorm': LPLayerNorm,\n 'rmsnorm': RMSNorm,\n 'low_precision_rmsnorm': LPRMSNorm,\n}" }, { "identifier": "MPTConfig", "path": "llmfoundry/models/mpt/configuration_mpt.py", "snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(\n self,\n d_model: int = 2048,\n n_heads: int = 16,\n n_layers: int = 24,\n expansion_ratio: int = 4,\n max_seq_len: int = 2048,\n vocab_size: int = 50368,\n resid_pdrop: float = 0.0,\n emb_pdrop: float = 0.0,\n learned_pos_emb: bool = True,\n attn_config: Dict = attn_config_defaults,\n ffn_config: Dict = ffn_config_defaults,\n init_device: str = 'cpu',\n logit_scale: Optional[Union[float, str]] = None,\n no_bias: bool = False,\n verbose: int = 0,\n embedding_fraction: float = 1.0,\n norm_type: str = 'low_precision_layernorm',\n use_cache: bool = False,\n init_config: Dict = init_config_defaults,\n fc_type: str = 'torch',\n **kwargs: Any,\n ):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the ffn.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention, grouped_query_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads.\n ffn_config (Dict): A dictionary used to configure the model's ffn module:\n ffn_type (str): type of ffn to use. Options: mptmlp, te_ln_mlp\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n fc_type (str): choose fc layer implementation. Options: torch and te. te layers support fp8 when using H100 GPUs.\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.ffn_config = ffn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n self.fc_type = fc_type\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n if self.attn_config.get('alibi', False):\n self.learned_pos_emb = False\n warnings.warn(\n f'alibi is turned on, setting `learned_pos_emb` to `False.`')\n super().__init__(**kwargs)\n\n self._validate_config()\n\n def _set_config_defaults(self, config: Dict[str, Any],\n config_defaults: Dict[str, Any]):\n # set config defaults\n for k, v in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n # set config defaults\n self.attn_config = self._set_config_defaults(\n self.attn_config,\n attn_config_defaults,\n )\n self.ffn_config = self._set_config_defaults(\n self.ffn_config,\n ffn_config_defaults,\n )\n self.init_config = self._set_config_defaults(\n self.init_config,\n init_config_defaults,\n )\n\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any(\n prob < 0 or prob > 1 for prob in\n [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop]):\n raise ValueError(\n \"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\"\n )\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(\n f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config[\n 'attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError(\n 'prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in [\n 'torch', 'triton'\n ]:\n raise NotImplementedError(\n 'alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config[\n 'attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError(\n 'attn_uses_sequence_id only implemented with torch and triton attention.'\n )\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError(\n 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!'\n )\n if isinstance(self.logit_scale,\n str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(\n f\"{self.logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\"\n )\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"{self.init_config=} 'name' needs to be set.\")\n if not self.learned_pos_emb and not self.attn_config['alibi']:\n warnings.warn(\n f'Positional information not being provided to the model using either learned_pos_emb or alibi.'\n )\n if self.fc_type == 'te' or self.ffn_config['ffn_type'] == 'te_ln_mlp':\n try:\n import transformer_engine.pytorch as te\n del te # unused\n except:\n raise ImportError(\n 'TransformerEngine import fail. `fc_type: te` requires TransformerEngine be installed. '\n +\n 'The required version of transformer_engine also requires FlashAttention v1.0.6 is installed:\\n'\n + 'pip install flash-attn==1.0.6 --no-build-isolation \\n' +\n 'pip install git+https://github.com/NVIDIA/TransformerEngine.git@144e4888b2cdd60bd52e706d5b7a79cb9c1a7156'\n )\n if self.ffn_config['ffn_type'] == 'mptmlp':\n self.ffn_config['fc_type'] = self.fc_type\n elif self.ffn_config['ffn_type'] == 'te_ln_mlp':\n self.ffn_config['bias'] = not self.no_bias" }, { "identifier": "AutoTokenizerForMOD", "path": "llmfoundry/models/utils/adapt_tokenizer.py", "snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args: Any, **kwargs: Any):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer" }, { "identifier": "adapt_tokenizer_for_denoising", "path": "llmfoundry/models/utils/adapt_tokenizer.py", "snippet": "def adapt_tokenizer_for_denoising(tokenizer: PreTrainedTokenizerBase):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n # Add sentinel tokens (e.g., <extra_id_0>, <extra_id_1>, and so on). Has no effect if these are already in the vocab.\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n\n # If the padding token has not been set, add <pad> and use it\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n\n # Register a property that gets us the ids of the sentinel tokens\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels,\n add_special_tokens=False).input_ids\n\n tokenizer.sentinel_token_ids = _sentinel_token_ids" }, { "identifier": "add_bidirectional_mask_if_missing", "path": "llmfoundry/models/utils/hf_prefixlm_converter.py", "snippet": "def add_bidirectional_mask_if_missing(batch: MutableMapping):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for i, continuation_indices in enumerate(\n batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif ('labels' in batch) and ('attention_mask' in batch):\n batch['bidirectional_mask'] = torch.logical_and(\n torch.eq(batch['attention_mask'], 1),\n torch.eq(batch['labels'], -100),\n ).type_as(batch['attention_mask'])\n else:\n raise KeyError(\n 'No bidirectional_mask in batch and not sure how to construct one.'\n )" }, { "identifier": "convert_hf_causal_lm_to_prefix_lm", "path": "llmfoundry/models/utils/hf_prefixlm_converter.py", "snippet": "def convert_hf_causal_lm_to_prefix_lm(\n model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n\n else:\n raise TypeError(\n f'Cannot convert model to Prefix LM. ' +\\\n f'Model does not belong to set of supported HF models:' +\\\n f'\\n{_SUPPORTED_HF_MODELS}'\n )" }, { "identifier": "init_empty_weights", "path": "llmfoundry/models/utils/meta_init_context.py", "snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool = False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'),\n include_buffers=include_buffers) as f:\n yield f" }, { "identifier": "generic_param_init_fn_", "path": "llmfoundry/models/utils/param_init_fns.py", "snippet": "def generic_param_init_fn_(\n module: nn.Module,\n init_fn_: Callable,\n n_layers: int,\n d_model: Optional[int] = None,\n init_div_is_residual: Union[int, float, str, bool] = True,\n emb_init_std: Optional[float] = None,\n emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,\n verbose: int = 0,\n **kwargs: Any,\n):\n del kwargs # unused, just to capture any extra args from the config\n if verbose > 1:\n warnings.warn(\n f'If model has bias parameters they are initialized to 0.')\n\n # enable user to divide _is_residual weights by\n # a value which defaults to math.sqrt(2 * cfg.n_layers)\n init_div_is_residual = init_div_is_residual\n\n if init_div_is_residual is False:\n # not used, for pyright\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(\n init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(\n init_div_is_residual, # type: ignore\n str) and init_div_is_residual.isnumeric():\n # do not trust YAML parsing to always convert numbers to numbers\n div_is_residual = float(init_div_is_residual)\n else:\n # not used, for pyright\n div_is_residual = 1.0\n raise ValueError(\n f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}'\n )\n\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(\n f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' +\\\n f'Set `init_div_is_residual: false` in init config to disable this.'\n )\n\n if isinstance(module, tuple(set(FC_CLASS_REGISTRY.values()))):\n # Linear\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n assert isinstance(module.bias, torch.Tensor)\n torch.nn.init.zeros_(module.bias)\n\n if init_div_is_residual is not False and getattr(\n module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual) # type: ignore\n\n elif isinstance(module, nn.Embedding):\n # Embedding\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(\n f'Embedding layer initialized using normal distribution with mean=0 and {std=}.'\n )\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(\n f'Uniform init requires a min and a max limit. User input: {lim}.'\n )\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n a, b = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(\n f'Embedding layer initialized using uniform distribution in range {lim}.'\n )\n else:\n emb_init_fn_ = init_fn_\n\n emb_init_fn_(module.weight)\n\n elif isinstance(module,\n tuple(set(NORM_CLASS_REGISTRY.values()))): # type: ignore\n # Norm\n if verbose > 1:\n warnings.warn(\n f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.'\n )\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight) # type: ignore\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias) # type: ignore\n\n elif isinstance(module, nn.MultiheadAttention):\n # torch's MultiheadAttention\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and module.v_proj_weight is None\n assert d_model is not None\n # in_proj_weight is actually 3 layers and should be split up for width based init\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for s, e in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and module.v_proj_weight is not None\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n\n # bias\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n\n # out proj\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(\n module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n\n elif te is not None and isinstance(module, te.LayerNormMLP):\n if module.layer_norm_weight is not None:\n torch.nn.init.ones_(module.layer_norm_weight) # type: ignore\n if module.layer_norm_bias is not None:\n torch.nn.init.zeros_(module.layer_norm_bias) # type: ignore\n\n init_fn_(module.fc1_weight)\n if module.fc1_bias is not None:\n assert isinstance(module.fc1_bias, torch.Tensor)\n torch.nn.init.zeros_(module.fc1_bias)\n init_fn_(module.fc2_weight)\n if module.fc2_bias is not None:\n assert isinstance(module.fc2_bias, torch.Tensor)\n torch.nn.init.zeros_(module.fc2_bias)\n\n with torch.no_grad():\n module.fc2_weight.div_(div_is_residual) # type: ignore\n\n else:\n for _ in module.parameters(recurse=False):\n # raise error if uninitialized module has any parameters\n raise NotImplementedError(\n f'{module.__class__.__name__} parameters are not initialized by param_init_fn.'\n )" }, { "identifier": "MODEL_INIT_REGISTRY", "path": "llmfoundry/models/utils/param_init_fns.py", "snippet": "MODEL_INIT_REGISTRY = {\n 'default_': torch_default_param_init_fn_,\n 'baseline_': baseline_param_init_fn_,\n 'kaiming_uniform_': kaiming_uniform_param_init_fn_,\n 'kaiming_normal_': kaiming_normal_param_init_fn_,\n 'neox_init_': neox_param_init_fn_,\n 'small_init_': small_param_init_fn_,\n 'xavier_uniform_': xavier_uniform_param_init_fn_,\n 'xavier_normal_': xavier_normal_param_init_fn_,\n}" } ]
import math import warnings import torch import torch.nn as nn import torch.nn.functional as F from typing import Any, List, Mapping, MutableMapping, Optional, Tuple, Union from composer.metrics import (InContextLearningLMAccuracy, InContextLearningLMExpectedCalibrationError, InContextLearningMCExpectedCalibrationError, InContextLearningMultipleChoiceAccuracy, InContextLearningQAAccuracy) from composer.metrics.nlp import LanguageCrossEntropy, LanguagePerplexity from composer.models import HuggingFaceModel from composer.utils import dist from omegaconf import DictConfig from omegaconf import OmegaConf as om from transformers import PreTrainedModel, PreTrainedTokenizerBase from transformers.modeling_outputs import (BaseModelOutputWithPast, CausalLMOutputWithPast) from llmfoundry.models.layers.attention import attn_bias_shape, build_attn_bias from llmfoundry.models.layers.blocks import MPTBlock from llmfoundry.models.layers.custom_embedding import SharedEmbedding from llmfoundry.models.layers.fc import FC_CLASS_REGISTRY as FC_CLASS_REGISTRY from llmfoundry.models.layers.ffn import \ FFN_CLASS_REGISTRY as FFN_CLASS_REGISTRY from llmfoundry.models.layers.ffn import MPTMLP as MPTMLP from llmfoundry.models.layers.ffn import build_ffn as build_ffn from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY from llmfoundry.models.mpt.configuration_mpt import MPTConfig from llmfoundry.models.utils.adapt_tokenizer import ( AutoTokenizerForMOD, # type: ignore (see note), adapt_tokenizer_for_denoising, # type: ignore (see note) ) from llmfoundry.models.utils.hf_prefixlm_converter import ( add_bidirectional_mask_if_missing, # type: ignore (see note) convert_hf_causal_lm_to_prefix_lm, # type: ignore (see note) ) from llmfoundry.models.utils.meta_init_context import \ init_empty_weights # type: ignore (see note) from llmfoundry.models.utils.param_init_fns import ( generic_param_init_fn_, # type: ignore (see note) MODEL_INIT_REGISTRY, ) from llmfoundry.models.layers.flash_attn_triton import flash_attn_func as flash_attn_func from flash_attn.losses.cross_entropy import CrossEntropyLoss as FusedCrossEntropyLoss # type: ignore # isort: skip
11,923
'sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif (self.attn_uses_sequence_id is False) and (sequence_id is not None): warnings.warn( 'MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.' ) S = input_ids.size(1) assert ( S <= self.config.max_seq_len ), f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) # type: ignore if self.learned_pos_emb: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError( f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network ({len(past_key_values)=}; {self.config.n_layers=}).' ) # For attn_impl: triton and flash the past key tensor spec is (batch, seq, dim). # For attn_impl: torch the past key tensor spec is (batch, heads, head_dim, seq). # Here we shift position embedding using the `seq` dim of the past key past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError( f'Cannot forward input with past sequence length {past_position} and current sequence length ' + f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.' ) pos = torch.arange( past_position, S + past_position, dtype=torch.long, device=input_ids.device, ).unsqueeze(0) if attention_mask is not None: # adjust the position indices to account for padding tokens pos = torch.clamp( pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0, ) pos_emb = self.wpe(pos) # type: ignore x = tok_emb + pos_emb else: # ALiBi and NoPE use this path (RoPE will also use this path if / when enabled) x = tok_emb if self.embedding_fraction == 1: x = self.emb_drop(x) # type: ignore else: # this implementation is proposed on page 7 of the GLM-130B paper https://arxiv.org/abs/2210.02414 x_shrunk = (x * self.embedding_fraction) + ( x.detach() * (1 - self.embedding_fraction)) assert isinstance(self.emb_drop, nn.Module) # pyright x = self.emb_drop(x_shrunk) attn_bias, attention_mask = self._attn_bias( device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, ) # initialize the past key values cache if it should be used if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers) ] # type: ignore all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for b_idx, block in enumerate(self.blocks): # type: ignore if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) past_key_value = (past_key_values[b_idx] if past_key_values is not None else None) x, attn_weights, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal, ) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None # pyright all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) # type: ignore # add hidden states from the last decoder layer if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast( last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Param Initialization, needed for device='meta' fast initialization def param_init_fn(self, module: nn.Module): init_fn_name = self.config.init_config['name']
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ # NOTE: All utils are imported directly even if unused so that # HuggingFace can detect all the needed files to copy into its modules folder. # Otherwise, certain modules are missing. # isort: off try: except: pass # isort: on class MPTPreTrainedModel(PreTrainedModel): config_class = MPTConfig base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, config: MPTConfig): config._validate_config() super().__init__(config) self.attn_impl = config.attn_config['attn_impl'] self.prefix_lm = config.attn_config['prefix_lm'] self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id'] self.alibi = config.attn_config['alibi'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] self.learned_pos_emb = config.learned_pos_emb if config.init_device == 'mixed': if dist.get_local_rank() == 0: config.init_device = 'cpu' else: config.init_device = 'meta' if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys()) raise NotImplementedError( f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).' ) norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()] # CogView (https://arxiv.org/abs/2105.13290) and GLM-130B (https://arxiv.org/abs/2210.02414) # both report this helping with stabilizing training self.embedding_fraction = config.embedding_fraction self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device) if self.learned_pos_emb: self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device) self.emb_drop = nn.Dropout(config.emb_pdrop) self.blocks = nn.ModuleList([ MPTBlock( device=config.init_device, **config.to_dict(), ) for _ in range(config.n_layers) ]) self.norm_f = norm_class(config.d_model, device=config.init_device) if config.init_device != 'meta': print( f'You are using {config.init_device=}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.' ) self.apply(self.param_init_fn) self.is_causal = not self.prefix_lm # define attn mask self._attn_bias_initialized = False self.attn_bias = None self.attn_bias_shape = attn_bias_shape( self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id, ) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance( module.bias, nn.Parameter): if config.verbose: warnings.warn( f'Removing bias ({module.bias}) from {module}.') module.register_parameter('bias', None) # Print verbose info if config.verbose and config.verbose > 2: print(self) if 'verbose' not in self.config.init_config: self.config.init_config['verbose'] = self.config.verbose if self.config.init_config['verbose'] > 1: init_fn_name = self.config.init_config['name'] warnings.warn(f'Using {init_fn_name} initialization.') def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value: nn.Embedding): self.wte = value @torch.no_grad() def _attn_bias( self, device: torch.device, dtype: torch.dtype, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, ): if not self._attn_bias_initialized: if self.attn_bias_shape: self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) self.attn_bias = build_attn_bias( self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max, ) self._attn_bias_initialized = True # flash does not support prefix_lm and will incorporate any # attention_mask inside the attention module if self.attn_impl == 'flash': return self.attn_bias, attention_mask if self.attn_bias is not None: # .to(*args, **kwargs) is a no-op if tensor is already on # specified device or of specificed dtype self.attn_bias = self.attn_bias.to(dtype=dtype, device=device) attn_bias = self.attn_bias # If using torch or triton, we incorporate the prefix_mask (if appropriate) if self.prefix_lm: assert isinstance(attn_bias, torch.Tensor) # pyright assert isinstance(prefix_mask, torch.Tensor) # pyright attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask) # If using torch or triton, we incorporate sequence_id (if appropriate) if self.attn_uses_sequence_id and sequence_id is not None: assert isinstance(attn_bias, torch.Tensor) # pyright attn_bias = self._apply_sequence_id(attn_bias, sequence_id) # If using torch or triton, we incorporate attention_mask. This will output # None in place of attention_mask since it will not be further needed in the # attention modules. if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: # clamp to 0 necessary for torch 2.0 compile() _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and (attention_mask.shape != prefix_mask.shape): raise ValueError( f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill( ~attention_mask.view(-1, 1, 1, s_k), min_val) return attn_bias, None def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): s_k, s_q = attn_bias.shape[-2:] if (s_k != self.config.max_seq_len) or (s_q != self.config.max_seq_len): raise ValueError( 'attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}' ) # select seq_len subset of attn mask attn_bias = attn_bias[..., :seq_len, :seq_len] # Mix the causal max and the bidirectional mask to get the full # allowable attention (i.e. full = not accounting for padding yet) causal = torch.tril( torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}' ) # select seq_len subset of attn mask attn_bias = attn_bias[..., :seq_len, :seq_len] # Restrict attention to tokens that share the same value # in sequence_id cannot_attend = torch.logical_not( torch.eq( sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len), )).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None, inputs_embeds: Optional[torch.Tensor] = None, ): return_dict = (return_dict if return_dict is not None else self.config.return_dict) use_cache = (use_cache if use_cache is not None else self.config.use_cache) if attention_mask is not None: attention_mask = attention_mask.bool( ) # type: ignore (TODO to figure out the right type here) if prefix_mask is not None: prefix_mask = prefix_mask.bool( ) # type: ignore (TODO to figure out the right type here) # These args are passed in by keyword in huggingface's generate function # https://github.com/huggingface/transformers/blob/68287689f2f0d8b7063c400230b3766987abf18d/src/transformers/generation/utils.py#L2201-L2206 # but have not yet been fully implemented in MPTModel if not return_dict: raise NotImplementedError( 'return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError( 'output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.' ) if (attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training): raise NotImplementedError( 'MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError( 'prefix_mask is a required argument when MPT is configured with prefix_lm=True.' ) # Raise a not implemented error if input_embeds is not None (this is an arg in huggingface transformers and we need to support it for PEFT) if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds is not implemented for MPT.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError( 'sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif (self.attn_uses_sequence_id is False) and (sequence_id is not None): warnings.warn( 'MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.' ) S = input_ids.size(1) assert ( S <= self.config.max_seq_len ), f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) # type: ignore if self.learned_pos_emb: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError( f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network ({len(past_key_values)=}; {self.config.n_layers=}).' ) # For attn_impl: triton and flash the past key tensor spec is (batch, seq, dim). # For attn_impl: torch the past key tensor spec is (batch, heads, head_dim, seq). # Here we shift position embedding using the `seq` dim of the past key past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError( f'Cannot forward input with past sequence length {past_position} and current sequence length ' + f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.' ) pos = torch.arange( past_position, S + past_position, dtype=torch.long, device=input_ids.device, ).unsqueeze(0) if attention_mask is not None: # adjust the position indices to account for padding tokens pos = torch.clamp( pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0, ) pos_emb = self.wpe(pos) # type: ignore x = tok_emb + pos_emb else: # ALiBi and NoPE use this path (RoPE will also use this path if / when enabled) x = tok_emb if self.embedding_fraction == 1: x = self.emb_drop(x) # type: ignore else: # this implementation is proposed on page 7 of the GLM-130B paper https://arxiv.org/abs/2210.02414 x_shrunk = (x * self.embedding_fraction) + ( x.detach() * (1 - self.embedding_fraction)) assert isinstance(self.emb_drop, nn.Module) # pyright x = self.emb_drop(x_shrunk) attn_bias, attention_mask = self._attn_bias( device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, ) # initialize the past key values cache if it should be used if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers) ] # type: ignore all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for b_idx, block in enumerate(self.blocks): # type: ignore if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) past_key_value = (past_key_values[b_idx] if past_key_values is not None else None) x, attn_weights, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal, ) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None # pyright all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) # type: ignore # add hidden states from the last decoder layer if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast( last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Param Initialization, needed for device='meta' fast initialization def param_init_fn(self, module: nn.Module): init_fn_name = self.config.init_config['name']
MODEL_INIT_REGISTRY[init_fn_name](
16
2023-10-09 15:32:15+00:00
16k
jiangjiechen/auction-arena
src/auctioneer_base.py
[ { "identifier": "Bidder", "path": "src/bidder_base.py", "snippet": "class Bidder(BaseModel):\n name: str\n model_name: str \n budget: int \n desire: str\n plan_strategy: str\n temperature: float = 0.7\n overestimate_percent: int = 10\n correct_belief: bool\n enable_learning: bool = False\n \n llm: BaseLanguageModel = None\n openai_cost = 0\n llm_token_count = 0\n \n verbose: bool = False\n auction_hash: str = ''\n\n system_message: str = ''\n original_budget: int = 0\n\n # working memory\n profit: int = 0\n cur_item_id = 0\n items: list = []\n dialogue_history: list = [] # for gradio UI display\n llm_prompt_history: list = [] # for tracking llm calling\n items_won = []\n bid_history: list = [] # history of the bidding of a single item\n plan_instruct: str = '' # instruction for planning\n cur_plan: str = '' # current plan\n status_quo: dict = {} # belief of budget and profit, self and others\n withdraw: bool = False # state of withdraw\n learnings: str = '' # learnings from previous biddings. If given, then use it to guide the rest of the auction.\n max_bid_cnt: int = 4 # Rule Bidder: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n rule_bid_cnt: int = 0 # Rule Bidder: count of bids on one item\n\n # belief tracking\n failed_bid_cnt: int = 0 # count of failed bids (overspending)\n total_bid_cnt: int = 0 # count of total bids\n self_belief_error_cnt: int = 0\n total_self_belief_cnt: int = 0\n other_belief_error_cnt: int = 0\n total_other_belief_cnt: int = 0\n \n engagement_count: int = 0\n budget_history = []\n profit_history = []\n budget_error_history = []\n profit_error_history = []\n win_bid_error_history = []\n engagement_history = defaultdict(int)\n all_bidders_status = {} # track others' profit\n changes_of_plan = []\n \n # not used\n input_box: str = None\n need_input = False\n semaphore = 0\n\n class Config:\n arbitrary_types_allowed = True\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n \n @classmethod\n def create(cls, **data):\n instance = cls(**data)\n instance._post_init()\n return instance\n\n def _post_init(self):\n self.original_budget = self.budget\n self.system_message = SYSTEM_MESSAGE.format(\n name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n )\n self._parse_llm()\n self.dialogue_history += [\n SystemMessage(content=self.system_message), \n AIMessage(content='')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n\n def _parse_llm(self):\n if 'gpt-' in self.model_name:\n self.llm = ChatOpenAI(model=self.model_name, temperature=self.temperature, max_retries=30, request_timeout=1200)\n elif 'claude' in self.model_name:\n self.llm = ChatAnthropic(model=self.model_name, temperature=self.temperature, default_request_timeout=1200)\n elif 'bison' in self.model_name:\n self.llm = ChatGooglePalm(model_name=f'models/{self.model_name}', temperature=self.temperature)\n elif 'rule' in self.model_name or 'human' in self.model_name:\n self.llm = None\n else:\n raise NotImplementedError(self.model_name)\n \n # def _rotate_openai_org(self):\n # # use two organizations to avoid rate limit\n # if os.environ.get('OPENAI_ORGANIZATION_1') and os.environ.get('OPENAI_ORGANIZATION_2'):\n # return random.choice([os.environ.get('OPENAI_ORGANIZATION_1'), os.environ.get('OPENAI_ORGANIZATION_2')])\n # else:\n # return None\n \n def _run_llm_standalone(self, messages: list):\n \n with get_openai_callback() as cb:\n for i in range(6):\n try:\n input_token_num = self.llm.get_num_tokens_from_messages(messages)\n if 'claude' in self.model_name: # anthropic's claude\n result = self.llm(messages, max_tokens_to_sample=2048)\n elif 'bison' in self.model_name: # google's palm-2\n max_tokens = min(max(3900 - input_token_num, 192), 2048)\n if isinstance(self.llm, ChatVertexAI):\n result = self.llm(messages, max_output_tokens=max_tokens)\n else:\n result = self.llm(messages)\n elif 'gpt' in self.model_name: # openai\n if 'gpt-3.5-turbo' in self.model_name and '16k' not in self.model_name:\n max_tokens = max(3900 - input_token_num, 192)\n else:\n # gpt-4\n # self.llm.openai_organization = self._rotate_openai_org()\n max_tokens = max(8000 - input_token_num, 192)\n result = self.llm(messages, max_tokens=max_tokens)\n elif 'llama' in self.model_name.lower():\n raise NotImplementedError\n else:\n raise NotImplementedError\n break\n except:\n print(f'Retrying for {self.model_name} ({i+1}/6), wait for {2**(i+1)} sec...')\n time.sleep(2**(i+1))\n self.openai_cost += cb.total_cost\n self.llm_token_count = self.llm.get_num_tokens_from_messages(messages)\n return result.content\n\n def _get_estimated_value(self, item):\n value = item.true_value * (1 + self.overestimate_percent / 100)\n return int(value)\n \n def _get_cur_item(self, key=None):\n if self.cur_item_id < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id].__dict__[key]\n else:\n return self.items[self.cur_item_id]\n else:\n return 'no item left'\n \n def _get_next_item(self, key=None):\n if self.cur_item_id + 1 < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id + 1].__dict__[key]\n else:\n return self.items[self.cur_item_id + 1]\n else:\n return 'no item left'\n \n def _get_remaining_items(self, as_str=False):\n remain_items = self.items[self.cur_item_id + 1:]\n if as_str:\n return ', '.join([item.name for item in remain_items])\n else:\n return remain_items\n \n def _get_items_value_str(self, items: List[Item]):\n if not isinstance(items, list):\n items = [items]\n items_info = ''\n for i, item in enumerate(items):\n estimated_value = self._get_estimated_value(item)\n _info = f\"{i+1}. {item}, starting price is ${item.price}. Your estimated value for this item is ${estimated_value}.\\n\"\n items_info += _info\n return items_info.strip()\n \n # ********** Main Instructions and Functions ********** #\n \n def learn_from_prev_auction(self, past_learnings, past_auction_log):\n if not self.enable_learning or 'rule' in self.model_name or 'human' in self.model_name:\n return ''\n \n instruct_learn = INSTRUCT_LEARNING_TEMPLATE.format(\n past_auction_log=past_auction_log,\n past_learnings=past_learnings)\n\n result = self._run_llm_standalone([HumanMessage(content=instruct_learn)])\n self.dialogue_history += [\n HumanMessage(content=instruct_learn),\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in [HumanMessage(content=instruct_learn)]],\n 'result': result,\n 'tag': 'learn_0'\n })\n \n self.learnings = '\\n'.join(extract_numbered_list(result))\n if self.learnings != '':\n self.system_message += f\"\\n\\nHere are your key learning points and practical tips from a previous auction. You can use them to guide this auction:\\n```\\n{self.learnings}\\n```\"\n \n if self.verbose:\n print(f\"Learn from previous auction: {self.name} ({self.model_name}).\")\n return result\n\n def _choose_items(self, budget, items: List[Item]):\n '''\n Choose items within budget for rule bidders.\n Cheap ones first if maximize_items, expensive ones first if maximize_profit.\n '''\n sorted_items = sorted(items, key=lambda x: self._get_estimated_value(x), \n reverse=self.desire == 'maximize_profit')\n \n chosen_items = []\n i = 0\n while budget >= 0 and i < len(sorted_items):\n item = sorted_items[i]\n if item.price <= budget:\n chosen_items.append(item)\n budget -= item.price\n i += 1\n \n return chosen_items\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = INSTRUCT_PLAN_TEMPLATE.format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items), \n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n '''\n Plan for bidding with auctioneer's instruction and items information for customize estimated value.\n plan = plan(system_message, instruct_plan)\n '''\n if 'rule' in self.model_name: \n # self.cur_plan = ', '.join([x.name for x in self._choose_items(self.budget, self.items)])\n # self.dialogue_history += [\n # HumanMessage(content=plan_instruct),\n # AIMessage(content=self.cur_plan),\n # ]\n # return self.cur_plan\n return ''\n\n self.status_quo = {\n 'remaining_budget': self.budget,\n 'total_profits': {bidder: 0 for bidder in self.all_bidders_status.keys()},\n 'winning_bids': {bidder: {} for bidder in self.all_bidders_status.keys()},\n }\n\n if self.plan_strategy == 'none':\n self.plan_instruct = ''\n self.cur_plan = ''\n return None\n\n system_msg = SystemMessage(content=self.system_message)\n plan_msg = HumanMessage(content=plan_instruct)\n messages = [system_msg, plan_msg]\n result = self._run_llm_standalone(messages)\n \n if self.verbose:\n print(get_colored_text(plan_msg.content, 'red'))\n print(get_colored_text(result, 'green'))\n \n self.dialogue_history += [\n plan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': 'plan_0'\n })\n self.cur_plan = result\n self.plan_instruct = plan_instruct\n \n self.changes_of_plan.append([\n f\"{self.cur_item_id} (Initial)\", \n False, \n json.dumps(extract_jsons_from_text(result)[-1]),\n ])\n \n if self.verbose:\n print(f\"Plan: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n return result\n \n def get_rebid_instruct(self, auctioneer_msg: str):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg),\n AIMessage(content='')\n ]\n return auctioneer_msg\n\n def get_bid_instruct(self, auctioneer_msg: str, bid_round: int):\n auctioneer_msg = auctioneer_msg.replace(self.name, f'You ({self.name})')\n \n bid_instruct = INSTRUCT_BID_TEMPLATE.format(\n auctioneer_msg=auctioneer_msg, \n bidder_name=self.name,\n cur_item=self._get_cur_item(),\n estimated_value=self._get_estimated_value(self._get_cur_item()),\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n if bid_round == 0:\n if self.plan_strategy in ['static', 'none']:\n # if static planner, then no replanning is needed. status quo is updated in replanning. thus need to add status quo in bid instruct.\n bid_instruct = f\"\"\"The status quo of this auction so far is:\\n\"{json.dumps(self.status_quo, indent=4)}\"\\n\\n{bid_instruct}\\n---\\n\"\"\"\n else:\n bid_instruct = f'Now, the auctioneer says: \"{auctioneer_msg}\"'\n \n self.dialogue_history += [\n HumanMessage(content=bid_instruct),\n AIMessage(content='')\n ]\n return bid_instruct\n \n def bid_rule(self, cur_bid: int, min_markup_pct: float = 0.1):\n '''\n :param cur_bid: current highest bid\n :param min_markup_pct: minimum percentage for bid increase\n :param max_bid_cnt: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n '''\n # dialogue history already got bid_instruction.\n cur_item = self._get_cur_item()\n \n if cur_bid <= 0:\n next_bid = cur_item.price\n else:\n next_bid = cur_bid + min_markup_pct * cur_item.price\n \n if self.budget - next_bid >= 0 and self.rule_bid_cnt < self.max_bid_cnt:\n msg = int(next_bid)\n self.rule_bid_cnt += 1\n else:\n msg = -1\n \n content = f'The current highest bid for {cur_item.name} is ${cur_bid}. '\n content += \"I'm out!\" if msg < 0 else f\"I bid ${msg}! (Rule generated)\"\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=content)\n ]\n \n return msg\n \n def bid(self, bid_instruct):\n '''\n Bid for an item with auctioneer's instruction and bidding history.\n bid_history = bid(system_message, instruct_plan, plan, bid_history)\n '''\n if self.model_name == 'rule':\n return ''\n \n bid_msg = HumanMessage(content=bid_instruct)\n \n if self.plan_strategy == 'none':\n messages = [SystemMessage(content=self.system_message)]\n else:\n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n \n self.bid_history += [bid_msg]\n messages += self.bid_history\n \n result = self._run_llm_standalone(messages)\n \n self.bid_history += [AIMessage(content=result)]\n\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=result)\n ]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'bid_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(bid_instruct, 'yellow'))\n print(get_colored_text(result, 'green'))\n \n print(f\"Bid: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n self.total_bid_cnt += 1\n \n return result\n\n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct = INSTRUCT_SUMMARIZE_TEMPLATE.format(\n cur_item=self._get_cur_item(), \n bidding_history=bidding_history, \n hammer_msg=hammer_msg.strip(), \n win_lose_msg=win_lose_msg.strip(), \n bidder_name=self.name,\n prev_status=self._status_json_to_text(self.status_quo),\n )\n return instruct\n\n def summarize(self, instruct_summarize: str):\n '''\n Update belief/status quo\n status_quo = summarize(system_message, bid_history, prev_status + instruct_summarize)\n '''\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n \n if self.model_name == 'rule': \n self.rule_bid_cnt = 0 # reset bid count for rule bidder\n return ''\n \n messages = [SystemMessage(content=self.system_message)]\n # messages += self.bid_history\n summ_msg = HumanMessage(content=instruct_summarize)\n messages.append(summ_msg)\n\n status_quo_text = self._run_llm_standalone(messages)\n \n self.dialogue_history += [summ_msg, AIMessage(content=status_quo_text)]\n self.bid_history += [summ_msg, AIMessage(content=status_quo_text)]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': status_quo_text,\n 'tag': f'summarize_{self.cur_item_id}'\n })\n\n cnt = 0\n while cnt <= 3:\n sanity_msg = self._sanity_check_status_json(extract_jsons_from_text(status_quo_text)[-1])\n if sanity_msg == '':\n # pass sanity check then track beliefs\n consistency_msg = self._belief_tracking(status_quo_text)\n else:\n sanity_msg = f'- {sanity_msg}'\n consistency_msg = ''\n \n if sanity_msg != '' or (consistency_msg != '' and self.correct_belief):\n err_msg = f\"As {self.name}, here are some error(s) of your summary of the status JSON:\\n{sanity_msg.strip()}\\n{consistency_msg.strip()}\\n\\nPlease revise the status JSON based on the errors. Don't apologize. Just give me the revised status JSON.\".strip()\n \n # print(f\"{self.name}: revising status quo for the {cnt} time:\")\n # print(get_colored_text(err_msg, 'green'))\n # print(get_colored_text(status_quo_text, 'red'))\n \n messages += [AIMessage(content=status_quo_text), \n HumanMessage(content=err_msg)]\n status_quo_text = self._run_llm_standalone(messages)\n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=status_quo_text),\n ]\n cnt += 1\n else:\n break\n \n self.status_quo = extract_jsons_from_text(status_quo_text)[-1]\n\n if self.verbose:\n print(get_colored_text(instruct_summarize, 'blue'))\n print(get_colored_text(status_quo_text, 'green'))\n \n print(f\"Summarize: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n \n return status_quo_text\n \n def get_replan_instruct(self):\n instruct = INSTRUCT_REPLAN_TEMPLATE.format(\n status_quo=self._status_json_to_text(self.status_quo),\n remaining_items_info=self._get_items_value_str(self._get_remaining_items()),\n bidder_name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return instruct\n\n def replan(self, instruct_replan: str):\n '''\n plan = replan(system_message, instruct_plan, prev_plan, status_quo + (learning) + instruct_replan)\n '''\n if self.model_name == 'rule': \n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n if self.plan_strategy in ['none', 'static']:\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n self.withdraw = False\n return 'Skip replanning for bidders with static or no plan.'\n \n replan_msg = HumanMessage(content=instruct_replan)\n \n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n messages.append(replan_msg)\n\n result = self._run_llm_standalone(messages)\n \n new_plan_dict = extract_jsons_from_text(result)[-1]\n cnt = 0\n while len(new_plan_dict) == 0 and cnt < 2:\n err_msg = 'Your response does not contain a JSON-format priority list for items. Please revise your plan.'\n messages += [\n AIMessage(content=result),\n HumanMessage(content=err_msg),\n ]\n result = self._run_llm_standalone(messages)\n new_plan_dict = extract_jsons_from_text(result)[-1]\n \n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=result),\n ]\n cnt += 1\n \n old_plan_dict = extract_jsons_from_text(self.cur_plan)[-1]\n self.changes_of_plan.append([\n f\"{self.cur_item_id + 1} ({self._get_cur_item('name')})\", \n self._change_of_plan(old_plan_dict, new_plan_dict),\n json.dumps(new_plan_dict)\n ])\n \n self.plan_instruct = instruct_replan\n self.cur_plan = result\n self.withdraw = False\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n\n self.dialogue_history += [\n replan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'plan_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(instruct_replan, 'blue'))\n print(get_colored_text(result, 'green'))\n\n print(f\"Replan: {self.name} ({self.model_name}).\")\n return result\n \n def _change_of_plan(self, old_plan: dict, new_plan: dict):\n for k in new_plan:\n if new_plan[k] != old_plan.get(k, None):\n return True\n return False\n \n # *********** Belief Tracking and Sanity Check *********** #\n \n def bid_sanity_check(self, bid_price, prev_round_max_bid, min_markup_pct):\n # can't bid more than budget or less than previous highest bid\n if bid_price < 0:\n msg = None\n else:\n min_bid_increase = int(min_markup_pct * self._get_cur_item('price'))\n if bid_price > self.budget:\n msg = f\"you don't have insufficient budget (${self.budget} left)\"\n elif bid_price < self._get_cur_item('price'):\n msg = f\"your bid is lower than the starting bid (${self._get_cur_item('price')})\"\n elif bid_price < prev_round_max_bid + min_bid_increase:\n msg = f\"you must advance previous highest bid (${prev_round_max_bid}) by at least ${min_bid_increase} ({int(100 * min_markup_pct)}%).\"\n else:\n msg = None\n return msg\n\n def rebid_for_failure(self, fail_instruct: str):\n result = self.bid(fail_instruct)\n self.failed_bid_cnt += 1\n return result\n \n def _sanity_check_status_json(self, data: dict):\n if data == {}:\n return \"Error: No parsible JSON in your response. Possibly due to missing a closing curly bracket '}', or unpasible values (e.g., 'profit': 1000 + 400, instead of 'profit': 1400).\"\n\n # Check if all expected top-level keys are present\n expected_keys = [\"remaining_budget\", \"total_profits\", \"winning_bids\"]\n for key in expected_keys:\n if key not in data:\n return f\"Error: Missing '{key}' field in the status JSON.\"\n\n # Check if \"remaining_budget\" is a number\n if not isinstance(data[\"remaining_budget\"], (int, float)):\n return \"Error: 'remaining_budget' should be a number, and only about your remaining budget.\"\n\n # Check if \"total_profits\" is a dictionary with numbers as values\n if not isinstance(data[\"total_profits\"], dict):\n return \"Error: 'total_profits' should be a dictionary of every bidder.\"\n for bidder, profit in data[\"total_profits\"].items():\n if not isinstance(profit, (int, float)):\n return f\"Error: Profit for {bidder} should be a number.\"\n\n # Check if \"winning_bids\" is a dictionary and that each bidder's entry is a dictionary with numbers\n if not isinstance(data[\"winning_bids\"], dict):\n return \"Error: 'winning_bids' should be a dictionary.\"\n for bidder, bids in data[\"winning_bids\"].items():\n if not isinstance(bids, dict):\n return f\"Error: Bids for {bidder} should be a dictionary.\"\n for item, amount in bids.items():\n if not isinstance(amount, (int, float)):\n return f\"Error: Amount for {item} under {bidder} should be a number.\"\n\n # If everything is fine\n return \"\"\n \n def _status_json_to_text(self, data: dict):\n if 'rule' in self.model_name: return ''\n \n # Extract and format remaining budget\n structured_text = f\"* Remaining Budget: ${data.get('remaining_budget', 'unknown')}\\n\\n\"\n \n # Extract and format total profits for each bidder\n structured_text += \"* Total Profits:\\n\"\n if data.get('total_profits'):\n for bidder, profit in data['total_profits'].items():\n structured_text += f\" * {bidder}: ${profit}\\n\"\n \n # Extract and list the winning bids for each item by each bidder\n structured_text += \"\\n* Winning Bids:\\n\"\n if data.get('winning_bids'):\n for bidder, bids in data['winning_bids'].items():\n structured_text += f\" * {bidder}:\\n\"\n if bids:\n for item, amount in bids.items():\n structured_text += f\" * {item}: ${amount}\\n\"\n else:\n structured_text += f\" * No winning bids\\n\"\n \n return structured_text.strip()\n\n def _belief_tracking(self, status_text: str):\n '''\n Parse status quo and check if the belief is correct.\n '''\n belief_json = extract_jsons_from_text(status_text)[-1]\n # {\"remaining_budget\": 8000, \"total_profits\": {\"Bidder 1\": 1300, \"Bidder 2\": 1800, \"Bidder 3\": 0}, \"winning_bids\": {\"Bidder 1\": {\"Item 2\": 1200, \"Item 3\": 1000}, \"Bidder 2\": {\"Item 1\": 2000}, \"Bidder 3\": {}}}\n budget_belief = belief_json['remaining_budget']\n profits_belief = belief_json['total_profits']\n winning_bids = belief_json['winning_bids']\n\n msg = ''\n # track belief of budget\n self.total_self_belief_cnt += 1\n if budget_belief != self.budget:\n msg += f'- Your belief of budget is wrong: you have ${self.budget} left, but you think you have ${budget_belief} left.\\n'\n self.self_belief_error_cnt += 1\n self.budget_error_history.append([\n self._get_cur_item('name'),\n budget_belief,\n self.budget,\n ])\n \n # track belief of profits\n for bidder_name, profit in profits_belief.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n \n if self.name in bidder_name: \n bidder_name = self.name\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n real_profit = self.all_bidders_status[bidder_name]['profit']\n \n if profit != real_profit:\n if self.name == bidder_name:\n self.self_belief_error_cnt += 1\n else:\n self.other_belief_error_cnt += 1\n\n msg += f'- Your belief of total profit of {bidder_name} is wrong: {bidder_name} has earned ${real_profit} so far, but you think {bidder_name} has earned ${profit}.\\n'\n\n # add to history\n self.profit_error_history.append([\n f\"{bidder_name} ({self._get_cur_item('name')})\",\n profit,\n real_profit\n ])\n\n # track belief of winning bids\n for bidder_name, items_won_dict in winning_bids.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n\n real_items_won = self.all_bidders_status[bidder_name]['items_won']\n # items_won = [(item, bid_price), ...)]\n \n items_won_list = list(items_won_dict.keys())\n real_items_won_list = [str(x) for x, _ in real_items_won]\n \n if self.name in bidder_name:\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n if not item_list_equal(items_won_list, real_items_won_list):\n if bidder_name == self.name:\n self.self_belief_error_cnt += 1\n _bidder_name = f'you'\n else:\n self.other_belief_error_cnt += 1\n _bidder_name = bidder_name\n \n msg += f\"- Your belief of winning items of {bidder_name} is wrong: {bidder_name} won {real_items_won}, but you think {bidder_name} won {items_won_dict}.\\n\"\n\n self.win_bid_error_history.append([\n f\"{_bidder_name} ({self._get_cur_item('name')})\",\n ', '.join(items_won_list),\n ', '.join(real_items_won_list)\n ])\n \n return msg\n \n def win_bid(self, item: Item, bid: int):\n self.budget -= bid\n self.profit += item.true_value - bid\n self.items_won += [[item, bid]]\n msg = f\"Congratuations! You won {item} at ${bid}.\"# Now you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n return msg\n \n def lose_bid(self, item: Item):\n return f\"You lost {item}.\"# Now, you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n \n # set the profit information of other bidders\n def set_all_bidders_status(self, all_bidders_status: dict):\n self.all_bidders_status = all_bidders_status.copy()\n\n def set_withdraw(self, bid: int):\n if bid < 0: # withdraw\n self.withdraw = True\n elif bid == 0: # enable discount and bid again\n self.withdraw = False\n else: # normal bid\n self.withdraw = False\n self.engagement_count += 1\n self.engagement_history[self._get_cur_item('name')] += 1\n \n # ****************** Logging ****************** #\n \n # def _parse_hedging(self, plan: str): # deprecated\n # prompt = PARSE_HEDGE_INSTRUCTION.format(\n # item_name=self._get_cur_item(), \n # plan=plan)\n \n # with get_openai_callback() as cb:\n # llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n # result = llm([HumanMessage(content=prompt)]).content\n # self.openai_cost += cb.total_cost\n # # parse a number, which could be a digit\n # hedge_percent = re.findall(r'\\d+\\.?\\d*%', result)\n # if len(hedge_percent) > 0:\n # hedge_percent = hedge_percent[0].replace('%', '')\n # else:\n # hedge_percent = 0\n # return float(hedge_percent)\n \n def profit_report(self):\n '''\n Personal profit report at the end of an auction.\n '''\n msg = f\"* {self.name}, starting with ${self.original_budget}, has won {len(self.items_won)} items in this auction, with a total profit of ${self.profit}.:\\n\"\n profit = 0\n for item, bid in self.items_won:\n profit += item.true_value - bid\n msg += f\" * Won {item} at ${bid} over ${item.price}, with a true value of ${item.true_value}.\\n\"\n return msg.strip()\n \n def to_monitors(self, as_json=False):\n # budget, profit, items_won, tokens\n if len(self.items_won) == 0 and not as_json: \n items_won = [['', 0, 0]]\n else:\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n \n profit_error_history = self.profit_error_history if self.profit_error_history != [] or as_json else [['', '', '']]\n win_bid_error_history = self.win_bid_error_history if self.win_bid_error_history != [] or as_json else [['', '', '']]\n budget_error_history = self.budget_error_history if self.budget_error_history != [] or as_json else [['', '']]\n changes_of_plan = self.changes_of_plan if self.changes_of_plan != [] or as_json else [['', '', '']]\n \n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'model_name': self.model_name,\n 'desire': self.desire,\n 'plan_strategy': self.plan_strategy,\n 'overestimate_percent': self.overestimate_percent,\n 'temperature': self.temperature,\n 'correct_belief': self.correct_belief,\n 'enable_learning': self.enable_learning,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'tokens_used': self.llm_token_count,\n 'openai_cost': round(self.openai_cost, 2),\n 'failed_bid_cnt': self.failed_bid_cnt,\n 'self_belief_error_cnt': self.self_belief_error_cnt,\n 'other_belief_error_cnt': self.other_belief_error_cnt,\n 'failed_bid_rate': round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),\n 'self_error_rate': round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),\n 'other_error_rate': round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),\n 'engagement_count': self.engagement_count,\n 'engagement_history': self.engagement_history,\n 'changes_of_plan': changes_of_plan,\n 'budget_error_history': budget_error_history,\n 'profit_error_history': profit_error_history,\n 'win_bid_error_history': win_bid_error_history,\n 'history': self.llm_prompt_history\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n self.llm_token_count, \n round(self.openai_cost, 2), \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2), \n round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2), \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n changes_of_plan,\n budget_error_history,\n profit_error_history, \n win_bid_error_history\n ]\n\n def dialogue_to_chatbot(self):\n # chatbot: [[Human, AI], [], ...]\n # only dialogue will be sent to LLMs. chatbot is just for display.\n assert len(self.dialogue_history) % 2 == 0\n chatbot = []\n for i in range(0, len(self.dialogue_history), 2):\n # if exceeds the length of dialogue, append the last message\n human_msg = self.dialogue_history[i].content\n ai_msg = self.dialogue_history[i+1].content\n if ai_msg == '': ai_msg = None\n if human_msg == '': human_msg = None\n chatbot.append([human_msg, ai_msg])\n return chatbot" }, { "identifier": "HumanBidder", "path": "src/human_bidder.py", "snippet": "class HumanBidder(Bidder):\n name: str\n human_name: str = \"Adam\"\n budget: int\n auction_hash: str\n \n cur_item_id = 0\n items: list = []\n withdraw: bool = False\n \n engagement_count: int = 0\n original_budget: int = 0\n profit: int = 0\n items_won = []\n \n all_bidders_status = {} # track others' profit\n \n # essential for demo\n need_input: bool = False\n semaphore: int = 0 # if needs input, then semaphore is set as 1, else waits.\n input_box: str = None # global variable for accepting user input\n \n # not used\n model_name: str = 'human'\n openai_cost = 0\n desire = ''\n plan_strategy = ''\n correct_belief = True\n \n class Config:\n arbitrary_types_allowed = True\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = \"As {bidder_name}, you have a total budget of ${budget}. This auction has a total of {item_num} items to be sequentially presented, they are:\\n{items_info}\".format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items)\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n # Human = auctioneer, AI = bidder\n self.dialogue_history += [\n HumanMessage(content=plan_instruct),\n AIMessage(content='(Getting ready...)')\n ]\n return ''\n \n def get_bid_instruct(self, auctioneer_msg, bid_round):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg), \n AIMessage(content='')\n ]\n return auctioneer_msg\n \n def bid(self, bid_instruct):\n # wait for the cue to handle user input\n while self.semaphore <= 0:\n time.sleep(1)\n \n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=self.input_box)\n ]\n self.semaphore -= 1\n self.need_input = False\n return self.input_box\n \n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct_summarize = f\"{bidding_history}\\n\\n{hammer_msg}\\n{win_lose_msg}\"\n return instruct_summarize\n \n def summarize(self, instruct_summarize: str):\n self.dialogue_history += [\n HumanMessage(content=instruct_summarize),\n AIMessage(content='(Taking notes...)')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n return ''\n \n def get_replan_instruct(self):\n return ''\n\n def replan(self, instruct_replan):\n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n def to_monitors(self, as_json=False):\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'human_name': self.human_name,\n 'model_name': self.model_name,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'engagement_count': self.engagement_count,\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n 0, \n 0, \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n 0, \n 0, \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n [],\n [],\n [], \n []\n ]" }, { "identifier": "Item", "path": "src/item_base.py", "snippet": "class Item():\n def __init__(self, id: int, name: str, price: int, desc: str, true_value: int):\n self.id = id\n self.name = name\n self.price = price\n self.desc = desc\n self.true_value = true_value\n self._original_price = price\n\n def get_desc(self):\n return f\"{self.name}, starting at ${int(self.price)}.\"\n\n def __repr__(self):\n return f\"{self.name}\"\n \n def __str__(self):\n return f\"{self.name}\"\n \n def info(self):\n return f\"{self.name}: ${int(self.price)} to ${self.true_value}.\"\n\n def lower_price(self, percentage: float = 0.2):\n # lower starting price by 20%\n self.price = int(self.price * (1 - percentage))\n \n def reset_price(self):\n self.price = self._original_price" }, { "identifier": "PARSE_BID_INSTRUCTION", "path": "src/prompt_base.py", "snippet": "PARSE_BID_INSTRUCTION = \"\"\"\nYour task is to parse a response from a bidder in an auction, and extract the bidding price from the response. Here are the rules:\n- If the language model decides to withdraw from the bidding (e.g., saying \"I'm out!\"), output -1.\n- If a bidding price is mentioned (e.g., saying \"I bid $xxx!\"), output that price number (e.g., $xxx).\nHere is the response:\n\n{response}\n\nDon't say anything else other than just a number: either the bidding price (e.g., $xxx, with $) or -1.\n\"\"\".strip()" } ]
import re import random import inflect from typing import List, Dict from langchain.prompts import PromptTemplate from langchain.chat_models import ChatOpenAI from langchain.callbacks import get_openai_callback from pydantic import BaseModel from collections import defaultdict from langchain.schema import ( AIMessage, HumanMessage, SystemMessage ) from .bidder_base import Bidder from .human_bidder import HumanBidder from .item_base import Item from .prompt_base import PARSE_BID_INSTRUCTION
12,958
msg = f"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\n{bidding_history}\n\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?" return msg def ask_for_rebid(self, fail_msg: str, bid_price: int): return f"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid." def get_hammer_msg(self): if self.highest_bidder is None: return f"Since no one bid on {self.cur_item.name}, we'll move on to the next item." else: return f"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}."# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}." def check_hammer(self, bid_round: int): # check if the item is sold self.fail_to_sell = False num_bid = self._num_bids_in_round(bid_round) # highest_bidder has already been updated in record_bid(). # so when num_bid == 0 & highest_bidder is None, it means no one bid on this item if self.highest_bidder is None: if num_bid == 0: # failed to sell, as there is no highest bidder self.fail_to_sell = True if self.enable_discount and bid_round < 3: # lower the starting price by 50%. discoutn only applies to the first 3 rounds self.cur_item.lower_price(0.5) is_sold = False else: is_sold = True else: # won't happen raise ValueError(f"highest_bidder is None but num_bid is {num_bid}") else: if self.prev_round_max_bid < 0 and num_bid == 1: # only one bidder in the first round is_sold = True else: self.prev_round_max_bid = self.highest_bid is_sold = self._num_bids_in_round(bid_round) == 0 return is_sold def _num_bids_in_round(self, bid_round: int): # check if there is no bid in the current round cnt = 0 for hist in self.bidding_history[bid_round]: if hist['bid'] > 0: cnt += 1 return cnt def hammer_fall(self): print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.') self.auction_logs[f"{self.cur_item.get_desc()}"].append({ 'bidder': self.highest_bidder, 'bid': f"{self.highest_bid} (${self.cur_item.true_value})", # no need for the first $, as it will be added in the self.log() 'bid_round': 'Hammer price (true value)'}) self.cur_item = None self.highest_bidder = None self.highest_bid = -1 self.bidding_history = defaultdict(list) self.prev_round_max_bid = -1 self.fail_to_sell = False def end_auction(self): return len(self.items_queue) == 0 def gather_all_status(self, bidders: List[Bidder]): status = {} for bidder in bidders: status[bidder.name] = { 'profit': bidder.profit, 'items_won': bidder.items_won } return status def parse_bid(self, text: str): prompt = PARSE_BID_INSTRUCTION.format(response=text) with get_openai_callback() as cb: llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0) result = llm([HumanMessage(content=prompt)]).content self.openai_cost += cb.total_cost bid_number = re.findall(r'\$?\d+', result.replace(',', '')) # find number in the result if '-1' in result: return -1 elif len(bid_number) > 0: return int(bid_number[-1].replace('$', '')) else: print('* Rebid:', text) return None def log(self, bidder_personal_reports: list = [], show_model_name=True): ''' example Apparatus H, starting at $1000. 1st bid: Bidder 1 (gpt-3.5-turbo-16k-0613): $1200 Bidder 2 (gpt-3.5-turbo-16k-0613): $1100 Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn Bidder 4 (gpt-3.5-turbo-16k-0613): $1200 2nd bid: Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn Hammer price: Bidder 4 (gpt-3.5-turbo-16k-0613): $1200 ''' markdown_output = "## Auction Log\n\n" for i, (item, bids) in enumerate(self.auction_logs.items()): markdown_output += f"### {i+1}. {item}\n\n" cur_bid_round = -1 for i, bid in enumerate(bids): if bid['bid_round'] != cur_bid_round: cur_bid_round = bid['bid_round'] if isinstance(bid['bid_round'], int): markdown_output += f"\n#### {p.ordinal(bid['bid_round']+1)} bid:\n\n" else: markdown_output += f"\n#### {bid['bid_round']}:\n\n" bid_price = f"${bid['bid']}" if bid['bid'] != -1 else 'Withdrew'
p = inflect.engine() class Auctioneer(BaseModel): enable_discount: bool = False items: List[Item] = [] cur_item: Item = None highest_bidder: Bidder = None highest_bid: int = -1 bidding_history = defaultdict(list) # history about the bidding war of one item items_queue: List[Item] = [] # updates when a item is taken. auction_logs = defaultdict(list) # history about the bidding war of all items openai_cost = 0 prev_round_max_bid: int = -1 min_bid: int = 0 fail_to_sell = False min_markup_pct = 0.1 class Config: arbitrary_types_allowed = True def init_items(self, items: List[Item]): for item in items: # reset discounted price item.reset_price() self.items = items self.items_queue = items.copy() def summarize_items_info(self): desc = '' for item in self.items: desc += f"- {item.get_desc()}\n" return desc.strip() def present_item(self): cur_item = self.items_queue.pop(0) self.cur_item = cur_item return cur_item def shuffle_items(self): random.shuffle(self.items) self.items_queue = self.items.copy() def record_bid(self, bid_info: dict, bid_round: int): ''' Save the bidding history for each round, log the highest bidder and highest bidding ''' # bid_info: {'bidder': xxx, 'bid': xxx, 'raw_msg': xxx} self.bidding_history[bid_round].append(bid_info) for hist in self.bidding_history[bid_round]: if hist['bid'] > 0: if self.highest_bid < hist['bid']: self.highest_bid = hist['bid'] self.highest_bidder = hist['bidder'] elif self.highest_bid == hist['bid']: # random if there's a tie self.highest_bidder = random.choice([self.highest_bidder, hist['bidder']]) self.auction_logs[f"{self.cur_item.get_desc()}"].append( {'bidder': bid_info['bidder'], 'bid': bid_info['bid'], 'bid_round': bid_round}) def _biddings_to_string(self, bid_round: int): ''' Return a string that summarizes the bidding history in a round ''' # bid_hist_text = '' if bid_round == 0 else f'- {self.highest_bidder}: ${self.highest_bid}\n' bid_hist_text = '' for js in self.bidding_history[bid_round]: if js['bid'] < 0: bid_hist_text += f"- {js['bidder']} withdrew\n" else: bid_hist_text += f"- {js['bidder']}: ${js['bid']}\n" return bid_hist_text.strip() def all_bidding_history_to_string(self): bid_hist_text = '' for bid_round in self.bidding_history: bid_hist_text += f"Round {bid_round}:\n{self._biddings_to_string(bid_round)}\n\n" return bid_hist_text.strip() def ask_for_bid(self, bid_round: int): ''' Ask for bid, return the message to be sent to bidders ''' if self.highest_bidder is None: if bid_round > 0: msg = f"Seeing as we've had no takers at the initial price, we're going to lower the starting bid to ${self.cur_item.price} for {self.cur_item.name} to spark some interest! Do I have any takers?" else: remaining_items = [self.cur_item.name] + [item.name for item in self.items_queue] msg = f"Attention, bidders! {len(remaining_items)} item(s) left, they are: {', '.join(remaining_items)}.\n\nNow, please bid on {self.cur_item}. The starting price for bidding for {self.cur_item} is ${self.cur_item.price}. Anyone interested in this item?" else: bidding_history = self._biddings_to_string(bid_round - 1) msg = f"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\n{bidding_history}\n\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?" return msg def ask_for_rebid(self, fail_msg: str, bid_price: int): return f"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid." def get_hammer_msg(self): if self.highest_bidder is None: return f"Since no one bid on {self.cur_item.name}, we'll move on to the next item." else: return f"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}."# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}." def check_hammer(self, bid_round: int): # check if the item is sold self.fail_to_sell = False num_bid = self._num_bids_in_round(bid_round) # highest_bidder has already been updated in record_bid(). # so when num_bid == 0 & highest_bidder is None, it means no one bid on this item if self.highest_bidder is None: if num_bid == 0: # failed to sell, as there is no highest bidder self.fail_to_sell = True if self.enable_discount and bid_round < 3: # lower the starting price by 50%. discoutn only applies to the first 3 rounds self.cur_item.lower_price(0.5) is_sold = False else: is_sold = True else: # won't happen raise ValueError(f"highest_bidder is None but num_bid is {num_bid}") else: if self.prev_round_max_bid < 0 and num_bid == 1: # only one bidder in the first round is_sold = True else: self.prev_round_max_bid = self.highest_bid is_sold = self._num_bids_in_round(bid_round) == 0 return is_sold def _num_bids_in_round(self, bid_round: int): # check if there is no bid in the current round cnt = 0 for hist in self.bidding_history[bid_round]: if hist['bid'] > 0: cnt += 1 return cnt def hammer_fall(self): print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.') self.auction_logs[f"{self.cur_item.get_desc()}"].append({ 'bidder': self.highest_bidder, 'bid': f"{self.highest_bid} (${self.cur_item.true_value})", # no need for the first $, as it will be added in the self.log() 'bid_round': 'Hammer price (true value)'}) self.cur_item = None self.highest_bidder = None self.highest_bid = -1 self.bidding_history = defaultdict(list) self.prev_round_max_bid = -1 self.fail_to_sell = False def end_auction(self): return len(self.items_queue) == 0 def gather_all_status(self, bidders: List[Bidder]): status = {} for bidder in bidders: status[bidder.name] = { 'profit': bidder.profit, 'items_won': bidder.items_won } return status def parse_bid(self, text: str): prompt = PARSE_BID_INSTRUCTION.format(response=text) with get_openai_callback() as cb: llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0) result = llm([HumanMessage(content=prompt)]).content self.openai_cost += cb.total_cost bid_number = re.findall(r'\$?\d+', result.replace(',', '')) # find number in the result if '-1' in result: return -1 elif len(bid_number) > 0: return int(bid_number[-1].replace('$', '')) else: print('* Rebid:', text) return None def log(self, bidder_personal_reports: list = [], show_model_name=True): ''' example Apparatus H, starting at $1000. 1st bid: Bidder 1 (gpt-3.5-turbo-16k-0613): $1200 Bidder 2 (gpt-3.5-turbo-16k-0613): $1100 Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn Bidder 4 (gpt-3.5-turbo-16k-0613): $1200 2nd bid: Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn Hammer price: Bidder 4 (gpt-3.5-turbo-16k-0613): $1200 ''' markdown_output = "## Auction Log\n\n" for i, (item, bids) in enumerate(self.auction_logs.items()): markdown_output += f"### {i+1}. {item}\n\n" cur_bid_round = -1 for i, bid in enumerate(bids): if bid['bid_round'] != cur_bid_round: cur_bid_round = bid['bid_round'] if isinstance(bid['bid_round'], int): markdown_output += f"\n#### {p.ordinal(bid['bid_round']+1)} bid:\n\n" else: markdown_output += f"\n#### {bid['bid_round']}:\n\n" bid_price = f"${bid['bid']}" if bid['bid'] != -1 else 'Withdrew'
if isinstance(bid['bidder'], Bidder) or isinstance(bid['bidder'], HumanBidder):
1
2023-10-08 09:30:57+00:00
16k
SH1ROd/Bert-VITS2-Integration-train-txt-infer
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(hparams, \"use_mel_posterior_encoder\", False)\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:\n audiopath = f'{_id}'\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n print(\"skipped: \", skipped, \", total: \", len(self.audiopaths_sid_text))\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")), \n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(audio_norm, self.filter_length,\n self.n_mel_channels, self.sampling_rate, self.hop_length,\n self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)\n else:\n spec = spectrogram_torch(audio_norm, self.filter_length,\n self.sampling_rate, self.hop_length, self.win_length,\n center=False)\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n # print(text, word2ph,phone, tone, language_str)\n pold = phone\n w2pho = [i for i in word2ph]\n word2ph = [i for i in word2ph]\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n pold2 = phone\n\n if self.add_blank:\n p1 = len(phone)\n phone = commons.intersperse(phone, 0)\n p2 = len(phone)\n t1 = len(tone)\n tone = commons.intersperse(tone, 0)\n t2 = len(tone)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str)\n torch.save(bert, bert_path)\n #print(bert.shape[-1], bert_path, text, pold)\n assert bert.shape[-1] == len(phone)\n\n assert bert.shape[-1] == len(phone), (\n bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate():\n \"\"\" Zero-pads model inputs and targets\n \"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]),\n dim=0, descending=True)\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, :text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, :spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, :wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, :tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, :language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, :bert.size(1)] = bert\n\n return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if (len_bucket == 0):\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]\n\n # subsample\n ids_bucket = ids_bucket[self.rank::self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer = 4,\n n_layers_trans_flow = 3,\n flow_share_parameter = False,\n use_transformer_flow = True,\n **kwargs):\n\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\"use_spk_conditioned_encoder\", True)\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels)\n self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,\n upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)\n self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,\n gin_channels=gin_channels)\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter)\n else:\n self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels)\n self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)\n self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)\n \n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]\n neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),\n s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n \n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)\n o = self.dec(z_slice, g=g)\n return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_)\n \n def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None):\n #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,\n 2) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): #vits2\n def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(filter_channels, 1), \n nn.Sigmoid() \n )\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2 " }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + '_' + str(y.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n wnsize_dtype_device = str(win_size) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + '_' + str(spec.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import os import json import argparse import itertools import math import torch import shutil import torch.multiprocessing as mp import torch.distributed as dist import logging import commons import utils from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
10,870
_, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=not hps.cont) _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
logging.getLogger('numba').setLevel(logging.WARNING) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision('medium') global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '65280' hps = utils.get_hparams() role='' for t in hps.data.spk2id.items(): role=t[0] if not hps.cont: folder_path = f"./logs/{role}" if not os.path.exists(folder_path): os.makedirs(folder_path) print(f"文件夹 '{role}' 已创建在 './logs/' 目录下。") else: print(f"文件夹 '{role}' 已经存在于 './logs/' 目录下。") shutil.copy('./pretrained_models/D_0.pth',f'./logs/{role}/D_0.pth') shutil.copy('./pretrained_models/G_0.pth',f'./logs/{role}/G_0.pth') shutil.copy('./pretrained_models/DUR_0.pth',f'./logs/{role}/DUR_0.pth') mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps, role)) def run(rank, n_gpus, hps, role): global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn) if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: print("Using duration discriminator for VITS2") use_duration_discriminator = True net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: if hps.data.n_speakers == 0: raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial = mas_noise_scale_initial, noise_scale_delta = noise_scale_delta, **hps.model).cuda(rank) freeze_enc = getattr(hps.model, "freeze_enc", False) if freeze_enc: print("freeze encoder !!!") for param in net_g.enc_p.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) if net_dur_disc is not None: net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) pretrain_dir = None if pretrain_dir is None: try: if net_dur_disc is not None: _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=not hps.cont) _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
9
2023-10-10 02:23:23+00:00
16k
sakemin/cog-musicgen-chord
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the chroma extraction.\n radix2_exp (int): Size of stft window for the chroma extraction (power of 2, e.g. 12 -> 2^12).\n nfft (int, optional): Number of FFT.\n winlen (int, optional): Window length.\n winhop (int, optional): Window hop size.\n argmax (bool, optional): Whether to use argmax. Defaults to False.\n norm (float, optional): Norm for chroma normalization. Defaults to inf.\n \"\"\"\n def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, nfft: tp.Optional[int] = None,\n winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, argmax: bool = False,\n norm: float = torch.inf):\n super().__init__()\n self.winlen = winlen or 2 ** radix2_exp\n self.nfft = nfft or self.winlen\n self.winhop = winhop or (self.winlen // 4)\n self.sample_rate = sample_rate\n self.n_chroma = n_chroma\n self.norm = norm\n self.argmax = argmax\n self.register_buffer('fbanks', torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,\n n_chroma=self.n_chroma)), persistent=False)\n self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,\n hop_length=self.winhop, power=2, center=True,\n pad=0, normalized=True)\n\n def forward(self, wav: torch.Tensor) -> torch.Tensor:\n T = wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.nfft:\n pad = self.nfft - T\n r = 0 if pad % 2 == 0 else 1\n wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)\n assert wav.shape[-1] == self.nfft, f\"expected len {self.nfft} but got {wav.shape[-1]}\"\n\n spec = self.spec(wav).squeeze(1)\n raw_chroma = torch.einsum('cf,...ft->...ct', self.fbanks, spec)\n norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)\n norm_chroma = rearrange(norm_chroma, 'b d t -> b t d')\n\n if self.argmax:\n idx = norm_chroma.argmax(-1, keepdim=True)\n norm_chroma[:] = 0\n norm_chroma.scatter_(dim=-1, index=idx, value=1)\n\n return norm_chroma" }, { "identifier": "ChordExtractor", "path": "audiocraft/modules/chord_chroma.py", "snippet": "class ChordExtractor(nn.Module):\n\n def __init__(self, device, sample_rate, max_duration, chroma_len, n_chroma, winhop):\n super().__init__()\n self.config = HParams.load(\"/src/audiocraft/modules/btc/run_config.yaml\") #gotta specify the path for run_config.yaml of btc\n\n # self.config.feature['large_voca'] = False\n # self.config.model['num_chords'] = 25\n\n self.model_file = '/src/audiocraft/modules/btc/test/btc_model_large_voca.pt'\n # self.model_file = 'audiocraft/modules/btc/test/btc_model.pt'\n self.idx_to_chord = idx2voca_chord()\n self.sr = sample_rate\n\n self.n_chroma = n_chroma\n self.max_duration = max_duration\n self.chroma_len = chroma_len\n self.to_timebin = self.max_duration/self.chroma_len\n self.timebin = winhop\n\n self.chords = chords.Chords()\n self.device = device\n\n self.denoise_window_size = 7\n self.denoise_threshold = 0.5\n \n self.model = BTC_model(config=self.config.model).to(device)\n if os.path.isfile(self.model_file):\n checkpoint = torch.load(self.model_file)\n self.mean = checkpoint['mean']\n self.std = checkpoint['std']\n self.model.load_state_dict(checkpoint['model'])\n\n def forward(self, wavs:torch.Tensor) -> torch.Tensor:\n sr = self.config.mp3['song_hz']\n chromas = []\n for wav in wavs:\n original_wav = librosa.resample(wav.cpu().numpy(), orig_sr=self.sr, target_sr=sr)\n original_wav = original_wav.squeeze(0)\n # print(original_wav.shape)\n T = original_wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.timebin//4:\n pad = self.timebin//4 - T\n r = 0 if pad % 2 == 0 else 1\n original_wav = F.pad(torch.Tensor(original_wav), (pad // 2, pad // 2 + r), 'constant', 0)\n original_wav = original_wav.numpy()\n assert original_wav.shape[-1] == self.timebin//4, f\"expected len {self.timebin//4} but got {original_wav.shape[-1]}\"\n # print(original_wav.shape)\n #preprocess\n currunt_sec_hz = 0\n\n while len(original_wav) > currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len']:\n start_idx = int(currunt_sec_hz)\n end_idx = int(currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len'])\n tmp = librosa.cqt(original_wav[start_idx:end_idx], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n if start_idx == 0:\n feature = tmp\n else:\n feature = np.concatenate((feature, tmp), axis=1)\n currunt_sec_hz = end_idx\n \n if currunt_sec_hz == 0:\n feature = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n else:\n tmp = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n feature = np.concatenate((feature, tmp), axis=1)\n # print(feature.shape)\n feature = np.log(np.abs(feature) + 1e-6)\n # print(feature)\n feature_per_second = self.config.mp3['inst_len'] / self.config.model['timestep']\n song_length_second = len(original_wav)/self.config.mp3['song_hz']\n\n feature = feature.T\n feature = (feature - self.mean)/self.std\n\n time_unit = feature_per_second\n n_timestep = self.config.model['timestep']\n\n num_pad = n_timestep - (feature.shape[0] % n_timestep)\n feature = np.pad(feature, ((0, num_pad), (0, 0)), mode=\"constant\", constant_values=0)\n num_instance = feature.shape[0] // n_timestep\n\n #inference\n start_time = 0.0\n lines = []\n with torch.no_grad():\n self.model.eval()\n feature = torch.tensor(feature, dtype=torch.float32).unsqueeze(0).to(self.device)\n for t in range(num_instance):\n self_attn_output, _ = self.model.self_attn_layers(feature[:, n_timestep * t:n_timestep * (t + 1), :])\n prediction, _ = self.model.output_layer(self_attn_output)\n prediction = prediction.squeeze()\n for i in range(n_timestep):\n if t == 0 and i == 0:\n prev_chord = prediction[i].item()\n continue\n if prediction[i].item() != prev_chord:\n lines.append(\n '%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n start_time = time_unit * (n_timestep * t + i)\n prev_chord = prediction[i].item()\n if t == num_instance - 1 and i + num_pad == n_timestep:\n if start_time != time_unit * (n_timestep * t + i):\n lines.append('%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n break\n\n strlines = ''.join(lines)\n\n chroma = []\n\n count = 0\n for line in lines:\n if count >= self.chroma_len: \n break\n splits = line.split()\n if len(splits) == 3:\n s = splits[0]\n e = splits[1]\n l = splits[2]\n\n crd = self.chords.chord(l)\n \n if crd[0] == -1:\n multihot = torch.Tensor(crd[2])\n else:\n multihot = torch.concat([torch.Tensor(crd[2])[-crd[0]:],torch.Tensor(crd[2])[:-crd[0]]])\n start_bin = round(float(s)/self.to_timebin)\n end_bin = round(float(e)/self.to_timebin)\n for j in range(start_bin,end_bin):\n if count >= self.chroma_len: \n break\n chroma.append(multihot)\n count += 1\n \n chroma = torch.stack(chroma, dim=0)\n\n # Denoising chroma\n kernel = torch.ones(self.denoise_window_size)/self.denoise_window_size\n\n filtered_signals = []\n for i in range(chroma.shape[-1]):\n filtered_signals.append(torch.nn.functional.conv1d(chroma[...,i].unsqueeze(0),\n kernel.unsqueeze(0).unsqueeze(0).to(chroma.device), \n padding=(self.denoise_window_size - 1) // 2))\n filtered_signals = torch.stack(filtered_signals, dim=-1)\n filtered_signals = filtered_signals > self.denoise_threshold\n\n chromas.append(filtered_signals.squeeze(0))\n \n return torch.stack(chromas, dim=0).to(self.device)" }, { "identifier": "StreamingModule", "path": "audiocraft/modules/streaming.py", "snippet": "class StreamingModule(nn.Module):\n \"\"\"Common API for streaming components.\n\n Each streaming component has a streaming state, which is just a dict[str, Tensor].\n By convention, the first dim of each tensor must be the batch size.\n Don't use dots in the key names, as this would clash with submodules\n (like in state_dict).\n\n If `self._is_streaming` is True, the component should use and remember\n the proper state inside `self._streaming_state`.\n\n To set a streaming component in streaming state, use\n\n with module.streaming():\n ...\n\n This will automatically reset the streaming state when exiting the context manager.\n This also automatically propagates to all streaming children module.\n\n Some module might also implement the `StreamingModule.flush` method, although\n this one is trickier, as all parents module must be StreamingModule and implement\n it as well for it to work properly. See `StreamingSequential` after.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self._streaming_state: State = {}\n self._is_streaming = False\n\n def _apply_named_streaming(self, fn: tp.Any):\n for name, module in self.named_modules():\n if isinstance(module, StreamingModule):\n fn(name, module)\n\n def _set_streaming(self, streaming: bool):\n def _set_streaming(name, module):\n module._is_streaming = streaming\n self._apply_named_streaming(_set_streaming)\n\n @contextmanager\n def streaming(self):\n \"\"\"Context manager to enter streaming mode. Reset streaming state on exit.\"\"\"\n self._set_streaming(True)\n try:\n yield\n finally:\n self._set_streaming(False)\n self.reset_streaming()\n\n def reset_streaming(self):\n \"\"\"Reset the streaming state.\"\"\"\n def _reset(name: str, module: StreamingModule):\n module._streaming_state.clear()\n\n self._apply_named_streaming(_reset)\n\n def get_streaming_state(self) -> State:\n \"\"\"Return the streaming state, including that of sub-modules.\"\"\"\n state: State = {}\n\n def _add(name: str, module: StreamingModule):\n if name:\n name += \".\"\n for key, value in module._streaming_state.items():\n state[name + key] = value\n\n self._apply_named_streaming(_add)\n return state\n\n def set_streaming_state(self, state: State):\n \"\"\"Set the streaming state, including that of sub-modules.\"\"\"\n state = dict(state)\n\n def _set(name: str, module: StreamingModule):\n if name:\n name += \".\"\n module._streaming_state.clear()\n for key, value in list(state.items()):\n # complexity is not ideal here, but probably fine.\n if key.startswith(name):\n local_key = key[len(name):]\n if '.' not in local_key:\n module._streaming_state[local_key] = value\n del state[key]\n\n self._apply_named_streaming(_set)\n assert len(state) == 0, list(state.keys())\n\n def flush(self, x: tp.Optional[torch.Tensor] = None):\n \"\"\"Flush any remaining outputs that were waiting for completion.\n Typically, for convolutions, this will add the final padding\n and process the last buffer.\n\n This should take an optional argument `x`, which will be provided\n if a module before this one in the streaming pipeline has already\n spitted out a flushed out buffer.\n \"\"\"\n if x is None:\n return None\n else:\n return self(x)" }, { "identifier": "create_sin_embedding", "path": "audiocraft/modules/transformer.py", "snippet": "def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,\n dtype: torch.dtype = torch.float32) -> torch.Tensor:\n \"\"\"Create sinusoidal positional embedding, with shape `[B, T, C]`.\n\n Args:\n positions (torch.Tensor): LongTensor of positions.\n dim (int): Dimension of the embedding.\n max_period (float): Maximum period of the cosine/sine functions.\n dtype (torch.dtype or str): dtype to use to generate the embedding.\n Returns:\n torch.Tensor: Sinusoidal positional embedding.\n \"\"\"\n # We aim for BTC format\n assert dim % 2 == 0\n half_dim = dim // 2\n positions = positions.to(dtype)\n adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)\n max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point\n phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))\n return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)" }, { "identifier": "audio_read", "path": "audiocraft/data/audio.py", "snippet": "def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,\n duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:\n \"\"\"Read audio by picking the most appropriate backend tool based on the audio format.\n\n Args:\n filepath (str or Path): Path to audio file to read.\n seek_time (float): Time at which to start reading in the file.\n duration (float): Duration to read from the file. If set to -1, the whole file is read.\n pad (bool): Pad output audio if not reaching expected duration.\n Returns:\n tuple of torch.Tensor, int: Tuple containing audio data and sample rate.\n \"\"\"\n fp = Path(filepath)\n if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg\n # There is some bug with ffmpeg and reading flac\n info = _soundfile_info(filepath)\n frames = -1 if duration <= 0 else int(duration * info.sample_rate)\n frame_offset = int(seek_time * info.sample_rate)\n wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)\n assert info.sample_rate == sr, f\"Mismatch of sample rates {info.sample_rate} {sr}\"\n wav = torch.from_numpy(wav).t().contiguous()\n if len(wav.shape) == 1:\n wav = torch.unsqueeze(wav, 0)\n else:\n wav, sr = _av_read(filepath, seek_time, duration)\n if pad and duration > 0:\n expected_frames = int(duration * sr)\n wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))\n return wav, sr" }, { "identifier": "SegmentInfo", "path": "audiocraft/data/audio_dataset.py", "snippet": "class SegmentInfo(BaseInfo):\n meta: AudioMeta\n seek_time: float\n # The following values are given once the audio is processed, e.g.\n # at the target sample rate and target number of channels.\n n_frames: int # actual number of frames without padding\n total_frames: int # total number of frames, padding included\n sample_rate: int # actual sample rate\n channels: int # number of audio channels." }, { "identifier": "convert_audio", "path": "audiocraft/data/audio_utils.py", "snippet": "def convert_audio(wav: torch.Tensor, from_rate: float,\n to_rate: float, to_channels: int) -> torch.Tensor:\n \"\"\"Convert audio to new sample rate and number of audio channels.\"\"\"\n wav = julius.resample_frac(wav, int(from_rate), int(to_rate))\n wav = convert_audio_channels(wav, to_channels)\n return wav" }, { "identifier": "AudioCraftEnvironment", "path": "audiocraft/environment.py", "snippet": "class AudioCraftEnvironment:\n \"\"\"Environment configuration for teams and clusters.\n\n AudioCraftEnvironment picks compute cluster settings (slurm, dora) from the current running environment\n or declared variable and the loaded team configuration. Additionally, the AudioCraftEnvironment\n provides pointers to a reference folder resolved automatically across clusters that is shared across team members,\n allowing to share sigs or other files to run jobs. Finally, it provides dataset mappers to automatically\n map dataset file paths to new locations across clusters, allowing to use the same manifest of files across cluters.\n\n The cluster type is identified automatically and base configuration file is read from config/teams.yaml.\n Use the following environment variables to specify the cluster, team or configuration:\n\n AUDIOCRAFT_CLUSTER (optional): Cluster type to enforce. Useful if the cluster type\n cannot be inferred automatically.\n AUDIOCRAFT_CONFIG (optional): Path to yaml config holding the teams configuration.\n If not set, configuration is read from config/teams.yaml.\n AUDIOCRAFT_TEAM (optional): Name of the team. Recommended to set to your own team.\n Cluster configuration are shared across teams to match compute allocation,\n specify your cluster configuration in the configuration file under a key mapping\n your team name.\n \"\"\"\n _instance = None\n DEFAULT_TEAM = \"default\"\n\n def __init__(self) -> None:\n \"\"\"Loads configuration.\"\"\"\n self.team: str = os.getenv(\"AUDIOCRAFT_TEAM\", self.DEFAULT_TEAM)\n cluster_type = _guess_cluster_type()\n cluster = os.getenv(\n \"AUDIOCRAFT_CLUSTER\", cluster_type.value\n )\n logger.info(\"Detecting cluster type %s\", cluster_type)\n\n self.cluster: str = cluster\n\n config_path = os.getenv(\n \"AUDIOCRAFT_CONFIG\",\n Path(__file__)\n .parent.parent.joinpath(\"config/teams\", self.team)\n .with_suffix(\".yaml\"),\n )\n self.config = omegaconf.OmegaConf.load(config_path)\n self._dataset_mappers = []\n cluster_config = self._get_cluster_config()\n if \"dataset_mappers\" in cluster_config:\n for pattern, repl in cluster_config[\"dataset_mappers\"].items():\n regex = re.compile(pattern)\n self._dataset_mappers.append((regex, repl))\n\n def _get_cluster_config(self) -> omegaconf.DictConfig:\n assert isinstance(self.config, omegaconf.DictConfig)\n return self.config[self.cluster]\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n @classmethod\n def reset(cls):\n \"\"\"Clears the environment and forces a reload on next invocation.\"\"\"\n cls._instance = None\n\n @classmethod\n def get_team(cls) -> str:\n \"\"\"Gets the selected team as dictated by the AUDIOCRAFT_TEAM env var.\n If not defined, defaults to \"labs\".\n \"\"\"\n return cls.instance().team\n\n @classmethod\n def get_cluster(cls) -> str:\n \"\"\"Gets the detected cluster.\n This value can be overridden by the AUDIOCRAFT_CLUSTER env var.\n \"\"\"\n return cls.instance().cluster\n\n @classmethod\n def get_dora_dir(cls) -> Path:\n \"\"\"Gets the path to the dora directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_DORA_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n dora_dir = os.getenv(\"AUDIOCRAFT_DORA_DIR\", cluster_config[\"dora_dir\"])\n logger.warning(f\"Dora directory: {dora_dir}\")\n return Path(dora_dir)\n\n @classmethod\n def get_reference_dir(cls) -> Path:\n \"\"\"Gets the path to the reference directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_REFERENCE_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return Path(os.getenv(\"AUDIOCRAFT_REFERENCE_DIR\", cluster_config[\"reference_dir\"]))\n\n @classmethod\n def get_slurm_exclude(cls) -> tp.Optional[str]:\n \"\"\"Get the list of nodes to exclude for that cluster.\"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return cluster_config.get(\"slurm_exclude\")\n\n @classmethod\n def get_slurm_partitions(cls, partition_types: tp.Optional[tp.List[str]] = None) -> str:\n \"\"\"Gets the requested partitions for the current team and cluster as a comma-separated string.\n\n Args:\n partition_types (list[str], optional): partition types to retrieve. Values must be\n from ['global', 'team']. If not provided, the global partition is returned.\n \"\"\"\n if not partition_types:\n partition_types = [\"global\"]\n\n cluster_config = cls.instance()._get_cluster_config()\n partitions = [\n cluster_config[\"partitions\"][partition_type]\n for partition_type in partition_types\n ]\n return \",\".join(partitions)\n\n @classmethod\n def resolve_reference_path(cls, path: tp.Union[str, Path]) -> Path:\n \"\"\"Converts reference placeholder in path with configured reference dir to resolve paths.\n\n Args:\n path (str or Path): Path to resolve.\n Returns:\n Path: Resolved path.\n \"\"\"\n path = str(path)\n\n if path.startswith(\"//reference\"):\n reference_dir = cls.get_reference_dir()\n logger.warn(f\"Reference directory: {reference_dir}\")\n assert (\n reference_dir.exists() and reference_dir.is_dir()\n ), f\"Reference directory does not exist: {reference_dir}.\"\n path = re.sub(\"^//reference\", str(reference_dir), path)\n\n return Path(path)\n\n @classmethod\n def apply_dataset_mappers(cls, path: str) -> str:\n \"\"\"Applies dataset mapping regex rules as defined in the configuration.\n If no rules are defined, the path is returned as-is.\n \"\"\"\n instance = cls.instance()\n\n for pattern, repl in instance._dataset_mappers:\n path = pattern.sub(repl, path)\n\n return path" }, { "identifier": "ResidualVectorQuantizer", "path": "audiocraft/quantization/vq.py", "snippet": "class ResidualVectorQuantizer(BaseQuantizer):\n \"\"\"Residual Vector Quantizer.\n\n Args:\n dimension (int): Dimension of the codebooks.\n n_q (int): Number of residual vector quantizers used.\n q_dropout (bool): Random quantizer drop out at train time.\n bins (int): Codebook size.\n decay (float): Decay for exponential moving average over the codebooks.\n kmeans_init (bool): Whether to use kmeans to initialize the codebooks.\n kmeans_iters (int): Number of iterations used for kmeans initialization.\n threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes\n that have an exponential moving average cluster size less than the specified threshold with\n randomly selected vector from the current batch.\n orthogonal_reg_weight (float): Orthogonal regularization weights.\n orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.\n orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.\n for orthogonal regularization.\n \"\"\"\n def __init__(\n self,\n dimension: int = 256,\n n_q: int = 8,\n q_dropout: bool = False,\n bins: int = 1024,\n decay: float = 0.99,\n kmeans_init: bool = True,\n kmeans_iters: int = 10,\n threshold_ema_dead_code: int = 2,\n orthogonal_reg_weight: float = 0.0,\n orthogonal_reg_active_codes_only: bool = False,\n orthogonal_reg_max_codes: tp.Optional[int] = None,\n ):\n super().__init__()\n self.max_n_q = n_q\n self.n_q = n_q\n self.q_dropout = q_dropout\n self.dimension = dimension\n self.bins = bins\n self.decay = decay\n self.kmeans_init = kmeans_init\n self.kmeans_iters = kmeans_iters\n self.threshold_ema_dead_code = threshold_ema_dead_code\n self.orthogonal_reg_weight = orthogonal_reg_weight\n self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only\n self.orthogonal_reg_max_codes = orthogonal_reg_max_codes\n self.vq = ResidualVectorQuantization(\n dim=self.dimension,\n codebook_size=self.bins,\n num_quantizers=self.n_q,\n decay=self.decay,\n kmeans_init=self.kmeans_init,\n kmeans_iters=self.kmeans_iters,\n threshold_ema_dead_code=self.threshold_ema_dead_code,\n orthogonal_reg_weight=self.orthogonal_reg_weight,\n orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,\n orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,\n channels_last=False\n )\n\n def forward(self, x: torch.Tensor, frame_rate: int):\n n_q = self.n_q\n if self.training and self.q_dropout:\n n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())\n bw_per_q = math.log2(self.bins) * frame_rate / 1000\n quantized, codes, commit_loss = self.vq(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n bw = torch.tensor(n_q * bw_per_q).to(x)\n return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))\n\n def encode(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Encode a given input tensor with the specified frame rate at the given bandwidth.\n The RVQ encode method sets the appropriate number of quantizer to use\n and returns indices for each quantizer.\n \"\"\"\n n_q = self.n_q\n codes = self.vq.encode(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n return codes\n\n def decode(self, codes: torch.Tensor) -> torch.Tensor:\n \"\"\"Decode the given codes to the quantized representation.\"\"\"\n # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].\n codes = codes.transpose(0, 1)\n quantized = self.vq.decode(codes)\n return quantized\n\n @property\n def total_codebooks(self):\n return self.max_n_q\n\n @property\n def num_codebooks(self):\n return self.n_q\n\n def set_num_codebooks(self, n: int):\n assert n > 0 and n <= self.max_n_q\n self.n_q = n" }, { "identifier": "TorchAutocast", "path": "audiocraft/utils/autocast.py", "snippet": "class TorchAutocast:\n \"\"\"TorchAutocast utility class.\n Allows you to enable and disable autocast. This is specially useful\n when dealing with different architectures and clusters with different\n levels of support.\n\n Args:\n enabled (bool): Whether to enable torch.autocast or not.\n args: Additional args for torch.autocast.\n kwargs: Additional kwargs for torch.autocast\n \"\"\"\n def __init__(self, enabled: bool, *args, **kwargs):\n self.autocast = torch.autocast(*args, **kwargs) if enabled else None\n\n def __enter__(self):\n if self.autocast is None:\n return\n try:\n self.autocast.__enter__()\n except RuntimeError:\n device = self.autocast.device\n dtype = self.autocast.fast_dtype\n raise RuntimeError(\n f\"There was an error autocasting with dtype={dtype} device={device}\\n\"\n \"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16\"\n )\n\n def __exit__(self, *args, **kwargs):\n if self.autocast is None:\n return\n self.autocast.__exit__(*args, **kwargs)" }, { "identifier": "EmbeddingCache", "path": "audiocraft/utils/cache.py", "snippet": "class EmbeddingCache:\n \"\"\"Cache around embeddings computation for faster execution.\n The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API\n to retrieve the pre-computed embeddings on full inputs and extract only a given chunk\n using a user-provided function. When the cache is warm (all embeddings are pre-computed),\n the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.\n Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint\n and synchronization points in the forward calls.\n\n Args:\n cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.\n device (str or torch.device): Device on which the embedding is returned.\n compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute\n the embedding from a given object and path. This user provided function can compute the\n embedding from the provided object or using the provided path as entry point. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract\n the desired embedding chunk from the full embedding loaded from the cache. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n If not specified, will return the full embedding unmodified.\n \"\"\"\n def __init__(self, cache_path: tp.Union[str, Path], device: tp.Union[str, torch.device],\n compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],\n extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):\n self.cache_path = Path(cache_path)\n self.device = device\n self._compute_embed_fn = compute_embed_fn\n self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]\n if extract_embed_fn is not None:\n self._extract_embed_fn = extract_embed_fn\n else:\n self._extract_embed_fn = partial(get_full_embed, device=device)\n if self.cache_path is not None:\n self.cache_path.mkdir(exist_ok=True, parents=True)\n logger.info(f\"Cache instantiated at: {self.cache_path}\")\n self.pool = ThreadPoolExecutor(8)\n self.pool.__enter__()\n self._current_batch_cache: dict = {}\n self._memory_cache: dict = {}\n\n def _get_cache_path(self, path: tp.Union[Path, str]):\n \"\"\"Get cache path for the given file path.\"\"\"\n sig = sha1(str(path).encode()).hexdigest()\n return self.cache_path / sig\n\n @staticmethod\n def _get_full_embed_from_cache(cache: Path):\n \"\"\"Loads full pre-computed embedding from the cache.\"\"\"\n try:\n embed = torch.load(cache, 'cpu')\n except Exception as exc:\n logger.error(\"Error loading %s: %r\", cache, exc)\n embed = None\n return embed\n\n def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:\n \"\"\"Get embedding from cache, computing and storing it to cache if not already cached.\n The EmbeddingCache first tries to load the embedding from the in-memory cache\n containing the pre-computed chunks populated through `populate_embed_cache`.\n If not found, the full embedding is computed and stored on disk to be later accessed\n to populate the in-memory cache, and the desired embedding chunk is extracted and returned.\n\n Args:\n paths (list[Path or str]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n embeds = []\n for idx, path in enumerate(paths):\n cache = self._get_cache_path(path)\n if cache in self._current_batch_cache:\n embed = self._current_batch_cache[cache]\n else:\n full_embed = self._compute_embed_fn(path, x, idx)\n try:\n with flashy.utils.write_and_rename(cache, pid=True) as f:\n torch.save(full_embed.cpu(), f)\n except Exception as exc:\n logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)\n else:\n logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)\n embed = self._extract_embed_fn(full_embed, x, idx)\n embeds.append(embed)\n embed = torch.stack(embeds, dim=0)\n return embed\n\n def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:\n \"\"\"Populate in-memory caches for embeddings reading from the embeddings stored on disk.\n The in-memory caches consist in a cache for the full embedding and another cache for the\n final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings\n and reduce the IO footprint and synchronization points during forward passes.\n\n Args:\n paths (list[Path]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n self._current_batch_cache.clear()\n if self.cache_path is not None:\n futures: list = []\n for path in paths:\n assert path is not None, \"Path is required for computation from cache\"\n cache = self._get_cache_path(path)\n if cache in self._memory_cache or not cache.exists():\n futures.append(None)\n else:\n futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))\n for idx, (path, future) in enumerate(zip(paths, futures)):\n assert path is not None\n cache = self._get_cache_path(path)\n full_embed = None\n if future is None:\n if cache in self._memory_cache:\n full_embed = self._memory_cache[cache]\n else:\n full_embed = future.result()\n if full_embed is not None:\n self._memory_cache[cache] = full_embed\n full_embed = full_embed.to(self.device)\n if full_embed is not None:\n embed = self._extract_embed_fn(full_embed, x, idx)\n self._current_batch_cache[cache] = embed" }, { "identifier": "collate", "path": "audiocraft/utils/utils.py", "snippet": "def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Get a list of tensors and collate them to a single tensor. according to the following logic:\n - `dim` specifies the time dimension which will be stacked and padded.\n - The output will contain 1 new dimension (dimension index 0) which will be the size of\n of the original list.\n\n Args:\n tensors (tp.List[torch.Tensor]): List of tensors to collate.\n dim (int): Dimension which will be stacked and padded.\n Returns:\n tp.Tuple[torch.Tensor, torch.Tensor]:\n torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension\n (dimension index 0) which will be the size of the original list.\n torch.Tensor: Tensor containing length of original tensor sizes (without padding).\n \"\"\"\n tensors = [x.transpose(0, dim) for x in tensors]\n lens = torch.LongTensor([len(x) for x in tensors])\n padded_tensors = pad_sequence(tensors)\n padded_tensors = padded_tensors.transpose(0, 1)\n padded_tensors = padded_tensors.transpose(1, dim + 1)\n return padded_tensors, lens" }, { "identifier": "hash_trick", "path": "audiocraft/utils/utils.py", "snippet": "def hash_trick(word: str, vocab_size: int) -> int:\n \"\"\"Hash trick to pair each word with an index\n\n Args:\n word (str): word we wish to convert to an index\n vocab_size (int): size of the vocabulary\n Returns:\n int: index of the word in the embedding LUT\n \"\"\"\n hash = int(hashlib.sha256(word.encode(\"utf-8\")).hexdigest(), 16)\n return hash % vocab_size" }, { "identifier": "length_to_mask", "path": "audiocraft/utils/utils.py", "snippet": "def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:\n \"\"\"Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).\n For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]\n\n Args:\n lengths (torch.Tensor): tensor with lengths\n max_len (int): can set the max length manually. Defaults to None.\n Returns:\n torch.Tensor: mask with 0s where there is pad tokens else 1s\n \"\"\"\n assert len(lengths.shape) == 1, \"Length shape should be 1 dimensional.\"\n final_length = lengths.max().item() if not max_len else max_len\n final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor\n return torch.arange(final_length, device=lengths.device)[None, :] < lengths[:, None]" }, { "identifier": "load_clap_state_dict", "path": "audiocraft/utils/utils.py", "snippet": "def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):\n \"\"\"Wrapper around state dict loading of CLAP model\n addressing compatibility issues between CLAP and AudioCraft\n HuggingFace transformer version.\n See: https://github.com/LAION-AI/CLAP/issues/118\n \"\"\"\n from clap_module.factory import load_state_dict # type: ignore\n pkg = load_state_dict(path)\n pkg.pop('text_branch.embeddings.position_ids', None)\n clap_model.model.load_state_dict(pkg)" }, { "identifier": "warn_once", "path": "audiocraft/utils/utils.py", "snippet": "@lru_cache(None)\ndef warn_once(logger, msg):\n \"\"\"Warn about a given message only once.\"\"\"\n logger.warning(msg)" }, { "identifier": "chords", "path": "audiocraft/modules/btc/utils/chords.py", "snippet": "def chords(self, labels):\n\n \"\"\"\n Transform a list of chord labels into an array of internal numeric\n representations.\n\n Parameters\n ----------\n labels : list\n List of chord labels (str).\n\n Returns\n -------\n chords : numpy.array\n Structured array with columns 'root', 'bass', and 'intervals',\n containing a numeric representation of chords.\n\n \"\"\"\n crds = np.zeros(len(labels), dtype=CHORD_DTYPE)\n cache = {}\n for i, lbl in enumerate(labels):\n cv = cache.get(lbl, None)\n if cv is None:\n cv = self.chord(lbl)\n cache[lbl] = cv\n crds[i] = cv\n\n return crds" } ]
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,447
entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None:
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None:
self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device,
10
2023-10-09 09:52:24+00:00
16k
RVC-Project/Retrieval-based-Voice-Conversion
rvc/modules/vc/modules.py
[ { "identifier": "Config", "path": "rvc/configs/config.py", "snippet": "class Config:\n def __new__(cls):\n if not hasattr(cls, \"_instance\"):\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n self.device: str = \"cuda:0\"\n self.is_half: bool = True\n self.use_jit: bool = False\n self.n_cpu: int = cpu_count()\n self.gpu_name: str | None = None\n self.json_config = self.load_config_json()\n self.gpu_mem: int | None = None\n self.instead: str | None = None\n (\n self.python_cmd,\n self.listen_port,\n self.noparallel,\n self.noautoopen,\n self.dml,\n ) = self.arg_parse()\n self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()\n\n @staticmethod\n def load_config_json() -> dict:\n return {\n config_file: json.load(open(config_file, \"r\"))\n for config_file in version_config_list\n }\n\n @staticmethod\n def arg_parse() -> tuple:\n parser: argparse.ArgumentParser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, default=7865, help=\"Listen port\")\n parser.add_argument(\n \"--pycmd\",\n type=str,\n default=sys.executable or \"python\",\n help=\"Python command\",\n )\n parser.add_argument(\n \"--noparallel\", action=\"store_true\", help=\"Disable parallel processing\"\n )\n parser.add_argument(\n \"--noautoopen\",\n action=\"store_true\",\n help=\"Do not open in browser automatically\",\n )\n parser.add_argument(\n \"--dml\",\n action=\"store_true\",\n help=\"torch_dml\",\n )\n cmd_opts: argparse.Namespace = parser.parse_args()\n\n cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865\n\n return (\n cmd_opts.pycmd,\n cmd_opts.port,\n cmd_opts.noparallel,\n cmd_opts.noautoopen,\n cmd_opts.dml,\n )\n\n @staticmethod\n def has_mps() -> bool:\n return torch.backends.mps.is_available() and not torch.zeros(1).to(\n torch.device(\"mps\")\n )\n\n @staticmethod\n def has_xpu() -> bool:\n return hasattr(torch, \"xpu\") and torch.xpu.is_available()\n\n def use_fp32_config(self) -> None:\n for config_file, data in self.json_config.items():\n try:\n data[\"train\"][\"fp16_run\"] = False\n with open(config_file, \"w\") as json_file:\n json.dump(data, json_file, indent=4)\n except Exception as e:\n logger.info(f\"Error updating {config_file}: {str(e)}\")\n logger.info(\"overwrite configs.json\")\n\n def device_config(self) -> tuple:\n if torch.cuda.is_available():\n if self.has_xpu():\n self.device = self.instead = \"xpu:0\"\n self.is_half = True\n i_device = int(self.device.split(\":\")[-1])\n self.gpu_name = torch.cuda.get_device_name(i_device)\n if (\n (\"16\" in self.gpu_name and \"V100\" not in self.gpu_name.upper())\n or \"P40\" in self.gpu_name.upper()\n or \"P10\" in self.gpu_name.upper()\n or \"1060\" in self.gpu_name\n or \"1070\" in self.gpu_name\n or \"1080\" in self.gpu_name\n ):\n logger.info(f\"Found GPU {self.gpu_name}, force to fp32\")\n self.is_half = False\n self.use_fp32_config()\n else:\n logger.info(f\"Found GPU {self.gpu_name}\")\n self.gpu_mem = int(\n torch.cuda.get_device_properties(i_device).total_memory\n / 1024\n / 1024\n / 1024\n + 0.4\n )\n elif self.has_mps():\n logger.info(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"mps\"\n self.is_half = False\n self.use_fp32_config()\n elif self.dml:\n import torch_directml\n\n self.device = torch_directml.device(torch_directml.default_device())\n self.is_half = False\n else:\n logger.info(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"cpu\"\n self.is_half = False\n self.use_fp32_config()\n\n if self.gpu_mem is not None and self.gpu_mem <= 4:\n x_pad = 1\n x_query = 5\n x_center = 30\n x_max = 32\n elif self.is_half:\n # 6G PU_RAM conf\n x_pad = 3\n x_query = 10\n x_center = 60\n x_max = 65\n else:\n # 5G GPU_RAM conf\n x_pad = 1\n x_query = 6\n x_center = 38\n x_max = 41\n\n logger.info(f\"Use {self.dml or self.instead} instead\")\n logger.info(f\"is_half:{self.is_half}, device:{self.device}\")\n return x_pad, x_query, x_center, x_max" }, { "identifier": "load_audio", "path": "rvc/lib/audio.py", "snippet": "def load_audio(file, sr):\r\n if not os.path.exists(file):\r\n raise RuntimeError(\r\n \"You input a wrong audio path that does not exists, please fix it!\"\r\n )\r\n try:\r\n with open(file, \"rb\") as f:\r\n with BytesIO() as out:\r\n audio2(f, out, \"f32le\", sr)\r\n return np.frombuffer(out.getvalue(), np.float32).flatten()\r\n\r\n except AttributeError:\r\n audio = file[1] / 32768.0\r\n if len(audio.shape) == 2:\r\n audio = np.mean(audio, -1)\r\n return librosa.resample(audio, orig_sr=file[0], target_sr=16000)\r\n\r\n except Exception:\r\n raise RuntimeError(traceback.format_exc())\r" }, { "identifier": "wav2", "path": "rvc/lib/audio.py", "snippet": "def wav2(i, o, format):\r\n inp = av.open(i, \"rb\")\r\n if format == \"m4a\":\r\n format = \"mp4\"\r\n out = av.open(o, \"wb\", format=format)\r\n if format == \"ogg\":\r\n format = \"libvorbis\"\r\n if format == \"mp4\":\r\n format = \"aac\"\r\n\r\n ostream = out.add_stream(format)\r\n\r\n for frame in inp.decode(audio=0):\r\n for p in ostream.encode(frame):\r\n out.mux(p)\r\n\r\n for p in ostream.encode(None):\r\n out.mux(p)\r\n\r\n out.close()\r\n inp.close()\r" }, { "identifier": "SynthesizerTrnMs256NSFsid", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n pitchf: torch.Tensor,\n y: torch.Tensor,\n y_lengths: torch.Tensor,\n ds: Optional[torch.Tensor] = None,\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n assert isinstance(rate, torch.Tensor)\n head = int(z_p.shape[2] * (1 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs256NSFsid_nono", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid_nono", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "Pipeline", "path": "rvc/modules/vc/pipeline.py", "snippet": "class Pipeline(object):\n def __init__(self, tgt_sr, config):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n model = \"full\"\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n elif f0_method == \"rmvpe\":\n if not hasattr(self, \"model_rmvpe\"):\n from rvc.lib.rmvpe import RMVPE\n\n logger.info(\n \"Loading rmvpe model,%s\" % \"%s/rmvpe.pt\" % os.environ[\"rmvpe_root\"]\n )\n self.model_rmvpe = RMVPE(\n \"%s/rmvpe.pt\" % os.environ[\"rmvpe_root\"],\n is_half=self.is_half,\n device=self.device,\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n if \"privateuseone\" in str(self.device): # clean ortruntime memory\n del self.model_rmvpe.model\n del self.model_rmvpe\n logger.info(\"Cleaning ortruntime memory\")\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int32)\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = feats.clone()\n if (\n not isinstance(index, type(None))\n and not isinstance(big_npy, type(None))\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch is not None and pitchf is not None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch is not None and pitchf is not None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n hasp = pitch is not None and pitchf is not None\n arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)\n audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()\n del hasp, arg\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[\"npy\"] += t1 - t0\n times[\"infer\"] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n f0_file=None,\n ):\n if (\n file_index\n and file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index)\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += np.abs(audio_pad[i : i - self.window])\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n audio_sum[t - self.t_query : t + self.t_query]\n == audio_sum[t - self.t_query : t + self.t_query].min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if \"mps\" not in str(self.device) or \"xpu\" not in str(self.device):\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[\"f0\"] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if tgt_sr != resample_sr >= 16000:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt" } ]
import logging import os import traceback import numpy as np import soundfile as sf import torch from collections import OrderedDict from io import BytesIO from pathlib import Path from rvc.configs.config import Config from rvc.lib.audio import load_audio, wav2 from rvc.lib.infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from rvc.modules.vc.pipeline import Pipeline from rvc.modules.vc.utils import *
11,768
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None
self.pipeline: Pipeline | None = None
7
2023-10-14 09:52:31+00:00
16k
zhijie-group/LOVECon
video_diffusion/pipelines/stable_diffusion_controlnet.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n mid_block_type: str = \"UNetMidBlockPseudo3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n **kwargs\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n \n # input\n self.conv_in = PseudoConv3d(in_channels, block_out_channels[0], \n kernel_size=3, padding=(1, 1), model_config=kwargs)\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n kwargs_copy=copy.deepcopy(kwargs)\n temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n and (not is_final_block))\n kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n # kwargs_copy.update({'SparseCausalAttention_index': temporal_downsample_i} )\n if temporal_downsample_i:\n print(f'Initialize model temporal downsample at layer {i}')\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n # mid\n if mid_block_type == \"UNetMidBlockPseudo3DCrossAttn\":\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n model_config=kwargs\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n \n kwargs_copy=copy.deepcopy(kwargs)\n kwargs_copy.update({'temporal_downsample': \n i < (self.temporal_downsample_time-1)})\n if i < (self.temporal_downsample_time-1):\n print(f'Initialize model temporal updample at layer {i}')\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n self.conv_out = PseudoConv3d(block_out_channels[0], out_channels, \n kernel_size=3, padding=1, model_config=kwargs)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = (\n num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n )\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(\n module,\n (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D, CrossAttnUpBlockPseudo3D, UpBlockPseudo3D),\n ):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None, # None\n attention_mask: Optional[torch.Tensor] = None, # None\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNetPseudo3DConditionOutput, Tuple]:\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None: # None\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample: # False\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n new_down_block_res_samples += (down_block_res_sample + down_block_additional_residual,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n # for i in down_block_res_samples: print(i.shape) \n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n \n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n )\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNetPseudo3DConditionOutput(sample=sample)\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n config[\"up_block_types\"] = [convert_2d_to_3d_block(block) for block in config[\"up_block_types\"]]\n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ControlNetPseudo3DModel", "path": "video_diffusion/models/controlnet_3d_condition.py", "snippet": "class ControlNetPseudo3DModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n **kwargs\n ):\n super().__init__()\n\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n # self.conv_in = PseudoConv3d(\n # in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n # )\n self.conv_in = InflatedConv3d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetPseudo3DConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n #non temperal \n # kwargs_copy=copy.deepcopy(kwargs)\n # temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n # and (not is_final_block))\n # kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n # model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n # controlnet_block = PseudoConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n # model_config=kwargs\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetPseudo3DOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n \n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb)\n\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n # print(sample.shape,controlnet_cond.shape)\n sample += controlnet_cond\n \n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetPseudo3DOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, control_temporal_idx=None, control_mid_temporal=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\"\n ]\n # config[\"control_temporal_idx\"] = control_temporal_idx\n # config[\"control_mid_temporal\"] = control_mid_temporal\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n\n state_dict = torch.load(model_file, map_location=\"cpu\")\n for k, v in model.state_dict().items():\n if '_temp.' in k:\n if 'conv' in k:\n state_dict.update({k: v})\n else:\n copyk = k\n copyk = copyk.replace('_temp.', '1.')\n state_dict.update({k: state_dict[copyk]})\n model.load_state_dict(state_dict)\n\n return model\n\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n \n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "attention_util", "path": "video_diffusion/prompt_attention/attention_util.py", "snippet": "class EmptyControl:\nclass AttentionControlEdit(AttentionStore, abc.ABC):\nclass AttentionReplace(AttentionControlEdit):\nclass AttentionRefine(AttentionControlEdit):\nclass AttentionReweight(AttentionControlEdit):\n def step_callback(self, x_t):\n def between_steps(self):\n def __call__(self, attn, is_cross: bool, place_in_unet: str):\n def step_callback(self, x_t):\n def replace_self_attention(self, attn_base, att_replace, reshaped_mask=None):\n def replace_cross_attention(self, attn_base, att_replace):\n def update_attention_position_dict(self, current_attention_key):\n def forward(self, attn, is_cross: bool, place_in_unet: str):\n def between_steps(self):\n def __init__(self, prompts, num_steps: int,\n cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],\n self_replace_steps: Union[float, Tuple[float, float]],\n latent_blend: Optional[SpatialBlender], tokenizer=None, \n additional_attention_store: AttentionStore =None,\n use_inversion_attention: bool=False,\n attention_blend: SpatialBlender= None,\n save_self_attention: bool=True,\n disk_store=False\n ):\n def replace_cross_attention(self, attn_base, att_replace):\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,\n latent_blend: Optional[SpatialBlender] = None, tokenizer=None,\n additional_attention_store=None,\n use_inversion_attention = False,\n attention_blend: SpatialBlender=None,\n save_self_attention: bool = True,\n disk_store=False):\n def replace_cross_attention(self, attn_base, att_replace):\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,\n latent_blend: Optional[SpatialBlender] = None, tokenizer=None,\n additional_attention_store=None,\n use_inversion_attention = False,\n attention_blend: SpatialBlender=None,\n save_self_attention : bool=True,\n disk_store = False\n ):\n def replace_cross_attention(self, attn_base, att_replace):\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, equalizer,\n latent_blend: Optional[SpatialBlender] = None, controller: Optional[AttentionControlEdit] = None, tokenizer=None,\n additional_attention_store=None,\n use_inversion_attention = False,\n attention_blend: SpatialBlender=None,\n save_self_attention:bool = True,\n disk_store = False\n ):\ndef get_equalizer(text: str, word_select: Union[int, Tuple[int, ...]], values: Union[List[float],\n Tuple[float, ...]], tokenizer=None):\ndef make_controller(tokenizer, prompts: List[str], is_replace_controller: bool,\n cross_replace_steps: Dict[str, float], self_replace_steps: float=0.0, \n blend_words=None, equilizer_params=None, \n additional_attention_store=None, use_inversion_attention = False, blend_th: float=(0.3, 0.3),\n NUM_DDIM_STEPS=None,\n blend_latents = False,\n blend_self_attention=False,\n save_path = None,\n save_self_attention = True,\n disk_store = False\n ) -> AttentionControlEdit:" } ]
import inspect import os, sys import PIL import torch import numpy as np import json import diffusers import bitsandbytes from dataclasses import dataclass from typing import Callable, List, Optional, Union,Dict,Any from einops import rearrange from tqdm import trange, tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from ..models.unet_3d_condition import UNetPseudo3DConditionModel from ..models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.prompt_attention import attention_util from accelerate import cpu_offload
11,286
# code mostly taken from https://github.com/huggingface/diffusers logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpatioTemporalStableDiffusionControlnetPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Spatio-Temporal Stable Diffusion. """ _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNetPseudo3DConditionModel,
# code mostly taken from https://github.com/huggingface/diffusers logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpatioTemporalStableDiffusionControlnetPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Spatio-Temporal Stable Diffusion. """ _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNetPseudo3DConditionModel,
controlnet : ControlNetPseudo3DModel,
1
2023-10-09 14:38:28+00:00
16k
mlpc-ucsd/MaskCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "maskclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # add MaskCLIP configs\n cfg.MODEL.CLIP_MODEL = CN()\n cfg.MODEL.CLIP_MODEL.NAME = 'ViT-L/14@336px'\n cfg.MODEL.CLIP_MODEL.INPUT_RESOLUTION = 336\n cfg.MODEL.CLIP_MODEL.PATCH_SIZE = 14\n cfg.MODEL.CLIP_MODEL.WIDTH = 1024\n cfg.MODEL.CLIP_MODEL.LAYERS = 24\n cfg.MODEL.CLIP_MODEL.HEADS = 16\n cfg.MODEL.CLIP_MODEL.OUTPUT_DIM = 768\n\n cfg.MODEL.CLIP_MODEL.TEMPERATURE = 0.01" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "maskclip/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "maskclip/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n # # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop_CategoryAreaConstraint(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n # cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "maskclip/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "maskclip/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from maskclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,476
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic":
mapper = MaskFormerPanopticDatasetMapper(cfg, True)
4
2023-10-13 02:32:25+00:00
16k
mlpc-ucsd/MasQCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "masqclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "add_masqclip_config", "path": "masqclip/config.py", "snippet": "def add_masqclip_config(cfg):\n \"\"\"\n Add config for MasQCLIP.\n \"\"\"\n cfg.MODEL.MASQ_CLIP = CN()\n cfg.MODEL.MASQ_CLIP.MODEL_NAME = [\"ViT-L/14@336px\"]\n \n cfg.MODEL.MASQ_CLIP.SCORE_THRESHOLD = 0.8\n cfg.MODEL.MASQ_CLIP.NMS_THRESHOLD = 0.1" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "masqclip/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "masqclip/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # change_code_note\n\n # # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "masqclip/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "masqclip/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
import copy import itertools import logging import os import torch import detectron2.utils.comm as comm import warnings from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from masqclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, add_masqclip_config, )
12,249
@classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...")
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MasQCLIP Training Script. """ # MasQCLIP warnings.filterwarnings("ignore") class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...")
model = SemanticSegmentorWithTTA(cfg, model)
7
2023-10-13 02:43:53+00:00
16k